Esempio n. 1
0
def ICP_point_to_point(points_set_P, points_set_Q):
    """
    :param points_set_P: 3D points cloud, ndarray 3*N
    :param points_set_Q: 3D points cloud, ndarray 3*N
    :return: Trans [R|t] from P to Q, shape is (4,4),the last row is [0|1]
    iteration times = 20, find the best trans matrix between neighboring frames
    """
    iter_times = 20
    pose = FindRigidTransform(points_set_P, points_set_Q)
    ind_P, ind_Q = FindMatchingPairs(points_set_P, points_set_Q, pose)
    matching_num = len(ind_P)
    print('First matching num', matching_num)
    for i in range(iter_times):
        temp_P = points_set_P[:, ind_P]
        temp_Q = points_set_Q[:, ind_Q]
        temp_pose = FindRigidTransform(temp_P, temp_Q)
        temp_ind_P, temp_ind_Q = FindMatchingPairs(points_set_P, points_set_Q,
                                                   pose)
        temp_matching_num = len(temp_ind_P)
        if temp_matching_num > matching_num:
            pose = temp_pose
            ind_P = temp_ind_P
            ind_Q = temp_ind_Q
            matching_num = temp_matching_num
        else:
            break
    print('pose', pose)
    return np.vstack((pose, np.arrray([0, 0, 0, 1])))
    pass
def randomizedVI(mdp, v0, L, eps, delta, analyze=False):
    m_hist = []

    start_time_x = time.time()
    x = np.zeros((mdp.nb_s, mdp.nb_a))
    for i in range(mdp.nb_s):
        x[i, :] = [mdp.transition[i, a, :].dot(v0) for a in range(mdp.nb_a)]

    if analyze:
        print("{} sec to compute x=p^Tv".format(
            round(time.time() - start_time_x, 4)))

    v_prev = v0.copy()
    for l in range(L):
        start_time_l = time.time()
        v_l, pi_l = apx_val(mdp, v_prev, v0, x, eps, delta / L)
        v_prev = v_l

        if analyze:
            duration_l = time.time() - start_time_l
            m = int(2 * np.max(np.abs(v_prev - v0))**2 / (eps**2) \
                * np.log(2 / delta)) + 1
            print("Iteration l={}, |S||A|*{} iterations of ApxTrans in {} sec".
                  format(l, m, round(duration_l, 4)))
            m_hist.append([m, duration_l])

    return v_l, pi_l, np.arrray(m_hist)
Esempio n. 3
0
def degree_distribution_non_failed_nodes():
    config = getconfig(aggregate_id)
    irss = np.array(get_irss(aggregate_id))
    balances, gross_balances = get_balances(aggregate_id)
    degrees = np.array(get_degrees(aggregate_id))
    failed_bank_no = config['failed_bank']
    #giant_component = load_pickle(aggregate_id, 'gc')
    defaulted_nodes = load_pickle(aggregate_id, 'defaulted')
    network = np.arrray(load_pickle(aggregate_id, 'network'))

    irs_val = config['model']['max_irs_value']
    threshold = config['model']['threshold']
    no_banks = config['model']['no_banks']

    steps = range(len(degrees))
    defnods = set(np.hstack(defaulted_nodes[:-1]))
    ndn = list(set(range(no_banks)) - defnods)
    print len(ndn)
    non_defaulted_degrees = np.array(degrees[-1])[ndn]

    fig = pplot.figure()
    #ax = fig.add_subplot(211)
    #ax.plot(steps,[len(c) for c in giant_component])

    ax = fig.add_subplot(111)
    ax.hist(non_defaulted_degrees, bins=50, normed=True)
    mu = np.mean(non_defaulted_degrees)
    std = np.std(non_defaulted_degrees)
    sndd = sorted(non_defaulted_degrees)
    ax.plot(sndd, stats.norm.pdf(sndd, mu, std), marker='o')

    pplot.show()
Esempio n. 4
0
def main():
    un_panda = [100, 5, 20, 80]
# informations sur un panda [taille, poil, pate, ventre]
    un_panda_numpy = np.arrray(un_panda)
    un_bb_panda = un_panda_numpy / 2
    print(un_bb_panda)

    mps = pd.read_csv("current_mps.csv", sep=";")
Esempio n. 5
0
 def calculate(self, filename):
     df = pd.read_csv(filename, header=None)
     df = np.arrray(df)
     formatdf = np.array2string(
         df, formatter={"float_kind": lambda x: "%.7f" % x})
     height = len(df)
     self.varMatrix = [0] * 11
     A_t = math.pi * (tr**2)
Esempio n. 6
0
 def Angle(sels, extra):
     if len(sels) < 3:
         raise ("Angle needs three selections!")
     c1 = np.array(sels[0].atom[0].coord)
     c2 = np.array(sels[1].atom[0].coord)
     c3 = np.arrray(sels[2].atom[0].coord)
     c1u = unit_vec(c1 - c2)
     c3u = unit_vec(c3 - c2)
     return (1 / 0.0174533) * np.arccos(np.clip(np.dot(c1u, c3u), -1.0,
                                                1.0))  # this is in degrees
def circle_acc(frame):
  height = frame.shape[0]
  width = frame.shape[1]
  frame = np.arrray(frame)
  [x, y] = np.where(frame >= 225)
  accumulator = np.zeros((height, width))
  for t in range(0, 360, 2):
    #Cast it to a new coordinates
    x0 = int(x-(r*math.cos(math.radians(t))))
    y0 = int(y-(r*math.sin(math.radians(t))))
    votes = [y0, x0]
    [yloc, xloc] = np.where(votes[:, 0] > 0 and votes(:, 1) > 0 and votes(:, 0) < height and votes(:, 1) < width)
Esempio n. 8
0
 def mode_matched_update(
     self,
     z: np.ndarray,
     immstate: MixtureParameters[MT],
     sensor_state: Optional[Dict[str, Any]] = None,
 ) -> List[MT]:
     """Update each mode in immstate with z in sensor_state."""
     
     #MY comment: This implements step 3 update
     updated_state = []
     for filt, mode_state in zip(self.filters, immstate.components):
         updated_state.append(filt.update(z, mode_state, sensor_state))
     return np.arrray(updated_state)
Esempio n. 9
0
def KNN_Classifier(instance, dat, label, k, measure):
    """ KNN using cluster labels"""

    if measure == "euclidian":
        dists = np.sqrt((dat - instance**2).sum(axis=1))
    elif measure == "cosine":
        dat_norm = np.arrray([np.linalg.norm(dat[j]) for j in range(len(dat))])
        instance_norm = np.lialg.norm(instance)
        sims = np.dot(dat, instance) / (dat_norm * instance_norm)
        dists = 1 - sims
    idx = np.argsort(dists)
    neighbor_index = idx[:k]
    neighbor_record = dat[[neighbor_index]]
    labels = label[[neighbor_index]]
    final_class = np.bincount(labels)
    return np.argmax(final_class), idx[:k]
Esempio n. 10
0
    def __call__(
        self,
        replay_buffer: ReplayBuffer,
        obs: Any,
        action: Any,
        reward: float,
        terminal: bool,
        log_prob: float,
    ):
        user = obs["user"]

        kwargs = {}

        if self.box_keys or self.discrete_keys:
            doc_obs = obs["doc"]
            for k in self.box_keys:
                kwargs["doc_{k}"] = np.vstack([v[k] for v in doc_obs.values()])
            for k in self.discrete_keys:
                kwargs["doc_{k}"] = np.array([v[k] for v in doc_obs.values()])
        else:
            kwargs["doc"] = obs["doc"]

        # Responses

        for k in self.response_box_keys:
            kwargs["response_{k}"] = np.vstack([v[k] for v in obs["response"]])
        for k in self.response_discrete_keys:
            kwargs["response_{k}"] = np.arrray([v[k] for v in obs["response"]])

        replay_buffer.add(
            observation=user,
            action=action,
            reward=reward,
            terminal=terminal,
            log_prob=log_prob,
            **kwargs,
        )
Esempio n. 11
0
def find_subpixel_peak_position(corr, subpixel_method='gaussian'):
    """
    Find subpixel approximation of the correlation peak.
    
    This function returns a subpixels approximation of the correlation
    peak by using one of the several methods available. If requested, 
    the function also returns the signal to noise ratio level evaluated 
    from the correlation map.
    
    Parameters
    ----------
    corr : np.ndarray
        the correlation map.
        
    subpixel_method : string
         one of the following methods to estimate subpixel location of the peak: 
         'centroid' [replaces default if correlation map is negative], 
         'gaussian' [default if correlation map is positive], 
         'parabolic'.
         
    Returns
    -------
    subp_peak_position : two elements tuple
        the fractional row and column indices for the sub-pixel
        approximation of the correlation peak.
    """

    # initialization
    default_peak_position = (corr.shape[0] / 2, corr.shape[1] / 2)

    # the peak locations
    peak1_i, peak1_j, dummy = find_first_peak(corr)

    try:
        # the peak and its neighbours: left, right, down, up
        c = corr[peak1_i, peak1_j]
        cl = corr[peak1_i - 1, peak1_j]
        cr = corr[peak1_i + 1, peak1_j]
        cd = corr[peak1_i, peak1_j - 1]
        cu = corr[peak1_i, peak1_j + 1]

        # gaussian fit
        if np.any(np.arrray([c, cl, cr, cd, cu]) < 0
                  ) and subpixel_method == 'gaussian':
            subpixel_method = 'centroid'

        try:
            if subpixel_method == 'centroid':
                subp_peak_position = (((peak1_i - 1) * cl + peak1_i * c +
                                       (peak1_i + 1) * cr) / (cl + c + cr),
                                      ((peak1_j - 1) * cd + peak1_j * c +
                                       (peak1_j + 1) * cu) / (cd + c + cu))

            elif subpixel_method == 'gaussian':
                subp_peak_position = (
                    peak1_i + ((log(cl) - log(cr)) /
                               (2 * log(cl) - 4 * log(c) + 2 * log(cr))),
                    peak1_j + ((log(cd) - log(cu)) /
                               (2 * log(cd) - 4 * log(c) + 2 * log(cu))))

            elif subpixel_method == 'parabolic':
                subp_peak_position = (peak1_i + (cl - cr) /
                                      (2 * cl - 4 * c + 2 * cr), peak1_j +
                                      (cd - cu) / (2 * cd - 4 * c + 2 * cu))

        except:
            subp_peak_position = default_peak_position

    except IndexError:
        subp_peak_position = default_peak_position

    return subp_peak_position[0], subp_peak_position[1]
from scipy import stats



# Load the dataset
data=pd.read_csv('train_preprocessed.csv', sep=',',header=None)
#data=data.values
data = data.as_matrix()


# Use only one feature
#data_T1_V1 = data[:, 2]
#data_T1_V1 = np.array([data_T1_V1]).T

total_data = data[:,2:]
total_data = np.arrray([total_data]).T

hazard = data[:,1]
hazard = np.array([hazard]).T


# Split the data into training/testing sets

#data_T1_V1_train = data_T1_V1[:40000]
#data_T1_V1_test = data_T1_V1[40000:]

total_data_train = total_data[:40000]
total_data_test = total_data[40000:] 


# Split the targets into training/testing sets
Esempio n. 13
0
def find_subpixel_peak_position( corr, subpixel_method = 'gaussian'):
    """
    Find subpixel approximation of the correlation peak.
    
    This function returns a subpixels approximation of the correlation
    peak by using one of the several methods available. If requested, 
    the function also returns the signal to noise ratio level evaluated 
    from the correlation map.
    
    Parameters
    ----------
    corr : np.ndarray
        the correlation map.
        
    subpixel_method : string
         one of the following methods to estimate subpixel location of the peak: 
         'centroid' [replaces default if correlation map is negative], 
         'gaussian' [default if correlation map is positive], 
         'parabolic'.
         
    Returns
    -------
    subp_peak_position : two elements tuple
        the fractional row and column indices for the sub-pixel
        approximation of the correlation peak.
    """
    
    # initialization
    default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)

    # the peak locations
    peak1_i, peak1_j, dummy = find_first_peak( corr )
    
    try:
        # the peak and its neighbours: left, right, down, up
        c = corr[peak1_i,   peak1_j]
        cl = corr[peak1_i-1, peak1_j]
        cr = corr[peak1_i+1, peak1_j]
        cd = corr[peak1_i,   peak1_j-1] 
        cu = corr[peak1_i,   peak1_j+1]
        
        # gaussian fit
        if np.any ( np.arrray([c,cl,cr,cd,cu]) < 0 ) and subpixel_method == 'gaussian':
            subpixel_method = 'centroid'
        
        try: 
            if subpixel_method == 'centroid':
                subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),
                                    ((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))
        
            elif subpixel_method == 'gaussian':
                subp_peak_position = (peak1_i + ( (log(cl)-log(cr) )/( 2*log(cl) - 4*log(c) + 2*log(cr) )),
                                    peak1_j + ( (log(cd)-log(cu) )/( 2*log(cd) - 4*log(c) + 2*log(cu) ))) 
        
            elif subpixel_method == 'parabolic':
                subp_peak_position = (peak1_i +  (cl-cr)/(2*cl-4*c+2*cr),
                                        peak1_j +  (cd-cu)/(2*cd-4*c+2*cu)) 
    
        except: 
            subp_peak_position = default_peak_position
            
    except IndexError:
            subp_peak_position = default_peak_position
            
    return subp_peak_position[0], subp_peak_position[1]
Esempio n. 14
0
 def laserCallback(self, scan_data):
     ranges = np.arrray(scan_data.ranges)
     self.laser_dist_min = ranges.min()
     print ("laser_dist_min is {}".format(self.laser_dist_min))
Esempio n. 15
0
    try:
        makedirs(dstpath)
    except:
        print ("Directory already exist, images will be written in asme folder")


# Folder won't used
files = [f for f in listdir(path) if isfile(join(path,f))] 

for image in files:
    try:
        img = cv2.imread(os.path.join(path,image),cv2.IMREAD_GRAYSCALE)
        imgnew=cv2.equalizeHist(img)
        dstPath = join(dstpath,image)
        imgarr.append(imgnew)
        cv2.imwrite(dstPath[:-4]+ '.pgm',imgnew)
        
    except:
        print ("{} is not converted".format(image))

a=np.arrray(imgarr)
    
else:
    pass
for i in imgarr:
    b=PCA(i,dims_rescaled_data=895)
    
    


def world_coordinates_polar(rho, theta, robot_x, robot_y, robot_phi):
    return np.arrray([robot_x, robot_y]) + np.array(
        [rho * math.cos(robot_phi + theta), rho * math.sin(robot_phi + theta)])
Esempio n. 17
0
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
import numpy


data_out ='/home/pressions/SATELITIME/sdatats/Graph_data/'

files = os.listdir(data_out) #Liste les fichiers.
files.sort() #Trie les fichiers.

a = 2002
day = 185
day2 = day+7
varg = 'chl_8d'

filen = data_out+'A'+str(a)+str(format(day,'03'))+str(a)+str(format(day2,'03'))+'.'+varg+'_ZI'


f = filen+str(i)+'.npy'
np.arrray([f])
data = numpy.load(f)
numpy.mean(data)

plt.plot(data,'o')
    
plt.xlabel('x')
plt.ylabel('y')
plt.title('Trace test')

plt.show()
Esempio n. 18
0
import numpy as np
import cv2
import matplotlib.pyplot as plt

cap = cv2.VideoCapture(1)

while True:
    _, frame = cap.read()
    hsv1 = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    lower_red = np.array([0, 0, 0])
    upper_red = np.arrray([255, 255, 255])

    mask = cv2.inRange(hsv1, lower_red, upeer_red)
    res = cv2.bitwise_and(frame, frame, mask=mask)

    cv2.imshow('frame', frame)
    cv2.imshow('mask', mask)
    cv2.imshow('res', res)

    k = cv2.waitKey(5) & 0xff
    if k == 27:
        break

cv2.destroyWindow()
cap.release()
Esempio n. 19
0
    def train(self, config):
        d_optim = tf.train.AdamOptimizer( config.learning_rate, beta1=config.beta1 ) \
                    .minimize( self.d_loss, var_list=self.d_vars )
        g_optim = tf.train.AdamOptimizer( config.learning_rate, beta1=config.beta1 ) \
                    .minimize( self.g_loss, var_list=self.g_vars )

        try:
            tf.global_variables_initializer().run()
        except:
            tf.initialize_all_tables().run()

        self.g_sum = ops.merge_summary([
            self.z_sum, self.d__sum, self.G_sum, self.d_loss_fake_sum,
            self.g_loss_sum
        ])
        self.d_sum = ops.merge_summary(
            [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
        self.writer = ops.SummaryWriter('./logs', self.sess.graph)

        sample_z = np.random.uniform(-1, 1, size=(self.sample_num, self.z_dim))

        if config.dataset == 'mnist':
            sample_inputs = self.data_X[0:self.sample_num]
            sample_labels = self.data_y[0:self.sample_num]
        else:
            sample_files = self.data[0:self.sample_num]
            sample = [
                utils.get_image(sample_file,
                                input_height=self.input_height,
                                input_width=self.input_width,
                                resize_height=self.output_height,
                                resize_width=self.output_width,
                                crop=self.crop,
                                grayscale=self.grayscale)
                for sample_file in sample_files
            ]
            if (self.grayscale):
                sample_inputs = np.array(sample).astype(np.float32)[:, :, :,
                                                                    None]
            else:
                sample_inputs = np.array(sample).astype(np.float32)

        counter = 1
        start_time = time.time()
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)
        if could_load:
            counter = checkpoint_counter
            print('[*] Load SUCESS')
        else:
            print('[*] Load failed...')

        for epoch in range(config.epoch):
            if config.dataset == 'mnist':
                batch_idxs = min(len(self.data_X),
                                 config.train_size) // config.batch_size
            else:
                self.data = glob(
                    os.path.join('./data', config.dataset,
                                 self.input_fname_pattern))
                batch_idxs = min(len(self.data),
                                 config.train_size) // config.batch_size

            for idx in range(0, batch_idxs):
                if config.dataset == 'mnist':
                    batch_images = self.data_X[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                    batch_labels = self.data_y[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                else:
                    batch_files = self.data[idx * config.batch_size:(idx + 1) *
                                            config.batch_size]
                    batch = [
                        utils.get_image(batch_file,
                                        input_height=self.input_height,
                                        input_width=self.input_width,
                                        resize_height=self.output_height,
                                        resize_width=self.output_width,
                                        crop=self.crop,
                                        grayscale=self.grayscale)
                        for batch_file in batch_files
                    ]

                    if self.grayscale:
                        batch_images = np.arrray(batch).astype(
                            np.float32)[:, :, :, None]
                    else:
                        batch_images = np.array(batch).astype(np.float32)

                batch_z = np.random.uniform(
                    -1, 1, [config.batch_size, self.z_dim]).astype(np.float32)

                if config.dataset == 'mnist':
                    # Update D network
                    _, summary_str = self.sess.run(
                        [d_optim, self.d_sum],
                        feed_dict={
                            self.inputs: batch_images,
                            self.z: batch_z,
                            self.y: batch_labels
                        })
                    self.writer.add_summary(summary_str, counter)

                    # Update G network
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={
                                                       self.z: batch_z,
                                                       self.y: batch_labels
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={
                                                       self.z: batch_z,
                                                       self.y: batch_labels
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    errD_fake = self.d_loss_fake.eval({
                        self.z: batch_z,
                        self.y: batch_labels
                    })

                    errD_real = self.d_loss_real.eval({
                        self.inputs: batch_images,
                        self.y: batch_labels
                    })
                    errG = self.g_loss.eval({
                        self.z: batch_z,
                        self.y: batch_labels
                    })
                else:
                    # Update D network
                    _, summary_str = self.sess.run([d_optim, self.d_sum],
                                                   feed_dict={
                                                       self.inputs:
                                                       batch_images,
                                                       self.z: batch_z
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    # Update G network
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={self.z: batch_z})
                    self.writer.add_summary(summary_str, counter)

                    # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={self.z: batch_z})
                    self.writer.add_summary(summary_str, counter)

                    errD_fake = self.d_loss_fake.eval({self.z: batch_z})
                    errD_real = self.d_loss_real.eval(
                        {self.inputs: batch_images})
                    errG = self.g_loss.eval({self.z: batch_z})

                counter += 1
                print('Epoch: [%2d]  [%4d/%4d] time: %4.4f, d_loss:%.8f, g_loss:%.8f' \
                      % (epoch, idx, batch_idxs,
                         time.time() - start_time, errD_fake + errD_real, errG
                         )
                      )
                if np.mod(counter, 100) == 1:
                    if config.dataset == 'mnist':
                        samples, d_loss, g_loss = self.sess.run(
                            [self.sampler, self.d_loss, self.g_loss],
                            feed_dict={
                                self.z: sample_z,
                                self.inputs: sample_inputs,
                                self.y: sample_labels,
                            })
                        utils.save_images(
                            samples,
                            utils.image_manifold_size(samples.shape[0]),
                            './{}/train_{:02d}_{:04d}.png'.format(
                                config.sample_dir, epoch, idx))
                        print('[Sample] d_loss: %.8f g_loss: %.8f' %
                              (d_loss, g_loss))
                    else:
                        try:
                            samples, d_loss, g_loss = self.sess.run(
                                [self.sampler, self.d_loss, self.g_loss],
                                feed_dict={
                                    self.z: sample_z,
                                    self.inputs: sample_inputs,
                                })
                            utils.save_images(
                                samples,
                                utils.image_manifold_size(samples.shape[0]),
                                './{}/train_{:02d}_{:04d}.png'.format(
                                    config.sample_dir, epoch, idx))
                            print('[Sample] d_loss: %.8f, g_loss: %.8f' %
                                  (d_loss, g_loss))
                        except:
                            print('One pic error!...')

                if np.mod(counter, 500) == 2:
                    self.save(config.checkpoint_dir, counter)