Exemplo n.º 1
0
 def normalizeh2(self,i,n,exclu):
     #normalize ith player excluding the exclu card
     if i!=4:
         #needs to preserve ones
         temp=np.copy(self.Prob[i])
         exclulist=[0.0 for j in range(54)]
         exclulist[exclu]=self.Prob[i,exclu] #excluded card
         exclulist=exclulist+(temp==1) #where the ones are=(temp==1)
         woutones=temp-(exclulist) #ones are eliminated
         if sum(woutones)!=0:
             self.Prob[i]=(n-sum(exclulist))/sum(woutones)*woutones+exclulist #the ones are not touched
         elif n-sum(exclulist)!=0:
             raise ValueError
         #the rest are normalized
     else:
         #needs to preserve ones
         temp=np.copy(self.Prob[i])
         exclulist=[0.0 for j in range(54)]
         exclulist[exclu]=self.Prob[i,exclu] #excluded card
         exclulist=exclulist+(temp==1) #where the ones are=(temp==1)
         woutones=temp-(exclulist) #ones are eliminated
         if sum(woutones)!=0:
             self.Prob[i]=(6.0-sum(exclulist)/sum(woutones))*woutones+exclulist
         elif n-sum(exclulist)!=0:
             raise ValueError
     if max(self.Prob[i])>1:
         self.Prob[i]=[min(P,1.0) for P in self.Prob[i]]
         self.normalizeh2(i,n,exclu)
Exemplo n.º 2
0
    def test_grad_log_likelihood_pings(self):
        """Ping test (compare analytic result to finite difference) the log likelihood gradient wrt hyperparameters."""
        numpy.random.seed(2014)
        h = 2.0e-4
        tolerance = 5.0e-6

        for num_sampled in self.num_sampled_list:
            self.gp_test_environment_input.num_sampled = num_sampled
            _, gaussian_process = self._build_gaussian_process_test_data(self.gp_test_environment_input)
            python_cov, historical_data = gaussian_process.get_core_data_copy()

            lml = GaussianProcessLogMarginalLikelihood(python_cov, historical_data)

            analytic_grad = lml.compute_grad_log_likelihood()
            for k in xrange(lml.num_hyperparameters):
                hyperparameters_old = lml.hyperparameters

                # hyperparamter + h
                hyperparameters_p = numpy.copy(hyperparameters_old)
                hyperparameters_p[k] += h
                lml.hyperparameters = hyperparameters_p
                cov_p = lml.compute_log_likelihood()
                lml.hyperparameters = hyperparameters_old

                # hyperparamter - h
                hyperparameters_m = numpy.copy(hyperparameters_old)
                hyperparameters_m[k] -= h
                lml.hyperparameters = hyperparameters_m
                cov_m = lml.compute_log_likelihood()
                lml.hyperparameters = hyperparameters_old

                # calculate finite diff
                fd_grad = (cov_p - cov_m) / (2.0 * h)

                self.assert_scalar_within_relative(fd_grad, analytic_grad[k], tolerance)
def test_connected(grid, start, tolerence=1):
    mark = np.in1d(grid, [
                        color_code_map_inv['w'], 
                        color_code_map_inv['g']
                    ]
            ).reshape(grid.shape)
    frontier = [start]
    mark[tuple(start)] = True
    mark_cnt = 1
    needed_mark_count = math.floor((mark.size - np.count_nonzero(mark)) * tolerence)
    while len(frontier) > 0:
        loc = frontier.pop()
        for i in range(len(grid.shape)):
            l = np.copy(loc)
            l[i] = loc[i] - 1
            if l[i] >= 0 and not mark[tuple(l)]:
                mark[tuple(l)] = True
                mark_cnt += 1
                frontier.append(np.copy(l))
            l[i] = loc[i] + 1
            if l[i] < grid.shape[i] and not mark[tuple(l)]:
                mark[tuple(l)] = True
                mark_cnt += 1
                frontier.append(np.copy(l))
        if mark_cnt >= needed_mark_count:
            return True
    return False
def fmm_single_wall_stokeslet(r_vectors, force, eta, a, *args, **kwargs):
  '''
  WARNING: pseudo-PBC are not implemented for this function.

  Compute the Stokeslet interaction plus self mobility
  II/(6*pi*eta*a) in the presence of a wall at z=0.
  It uses the fmm implemented in the library stfmm3d.
  Must compile mobility_fmm.f90 before this will work
  (see Makefile).

  For blobs overlaping the wall we use
  Compute M = B^T * M_tilde(z_effective) * B.
  '''
  # Get effective height
  r_vectors_effective = shift_heights(r_vectors, a)
  # Compute damping matrix B
  B, overlap = damping_matrix_B(r_vectors, a, *args, **kwargs)
  # Compute B * force
  if overlap is True:
    force = B.dot(force)
  # Compute M_tilde * B * vector
  num_particles = r_vectors.size // 3
  ier = 0
  iprec = 5
  r_vectors_fortran = np.copy(r_vectors_effective.T, order='F')
  force_fortran = np.copy(np.reshape(force, (num_particles, 3)).T, order='F')
  u_fortran = np.empty_like(r_vectors_fortran, order='F')
  fmm.fmm_stokeslet_half(r_vectors_fortran, force_fortran, u_fortran, ier, iprec, a, eta, num_particles)
  # Compute B.T * M * B * force
  if overlap is True:
    return B.dot(np.reshape(u_fortran.T, u_fortran.size))
  else:
    return np.reshape(u_fortran.T, u_fortran.size)
def replica_exchange(index_spin, index_replica, beta, d_beta,X = [[]] ):
    x1, x2 = X[index_replica, :], X[index_replica + 1, :]
    # r = P(x_k | beta_k+1)P(x_k+1 | beta_k) / P(x_k | beta_k)P(x_k+1 | beta_k+1)
    r = beta_power_of_prob(index_spin, beta + d_beta, x1, theta) * beta_power_of_prob(index_spin, beta, x2, theta) / beta_power_of_prob(index_spin, beta, x1, theta) * beta_power_of_prob(index_spin, beta +d_beta, x2, theta)
    if(np.random.uniform(size=1) < r):
        X[index_replica, :], X[index_replica, :] = np.copy(x2), np.copy(x1)
    return X
Exemplo n.º 6
0
def add_letters_to_axis(ax, letter_heights):
    """
    Plots letter on user-specified axis.

    Parameters
    ----------
    ax : axis
    letter_heights: Nx4 array
    """
    assert letter_heights.shape[1] == 4

    x_range = [1, letter_heights.shape[0]]
    pos_heights = np.copy(letter_heights)
    pos_heights[letter_heights < 0] = 0
    neg_heights = np.copy(letter_heights)
    neg_heights[letter_heights > 0] = 0

    for x_pos, heights in enumerate(letter_heights):
        letters_and_heights = sorted(zip(heights, 'ACGT'))
        y_pos_pos = 0.0
        y_neg_pos = 0.0
        for height, letter in letters_and_heights:
            if height > 0:
                add_letter_to_axis(ax, letter, 0.5 + x_pos, y_pos_pos, height)
                y_pos_pos += height
            else:
                add_letter_to_axis(ax, letter, 0.5 + x_pos, y_neg_pos, height)
                y_neg_pos += height

    ax.set_xlim(x_range[0] - 1, x_range[1] + 1)
    ax.set_xticks(range(*x_range) + [x_range[-1]])
    ax.set_aspect(aspect='auto', adjustable='box')
    ax.autoscale_view()
 def make_net(self, input_images, input_measurements, input_actions, input_objectives, reuse=False):
     if reuse:
         tf.get_variable_scope().reuse_variables()
     
     self.fc_val_params = np.copy(self.fc_joint_params)
     self.fc_val_params['out_dims'][-1] = self.target_dim
     self.fc_adv_params = np.copy(self.fc_joint_params)
     self.fc_adv_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
     p_img_conv = my_ops.conv_encoder(input_images, self.conv_params, 'p_img_conv', msra_coeff=0.9)
     p_img_fc = my_ops.fc_net(my_ops.flatten(p_img_conv), self.fc_img_params, 'p_img_fc', msra_coeff=0.9)
     p_meas_fc = my_ops.fc_net(input_measurements, self.fc_meas_params, 'p_meas_fc', msra_coeff=0.9)
     if isinstance(self.fc_obj_params, np.ndarray):
         p_obj_fc = my_ops.fc_net(input_objectives, self.fc_obj_params, 'p_obj_fc', msra_coeff=0.9)
         p_concat_fc = tf.concat([p_img_fc,p_meas_fc,p_obj_fc], 1)
     else:
         p_concat_fc = tf.concat([p_img_fc,p_meas_fc], 1)
         if self.random_objective_coeffs:
             raise Exception('Need fc_obj_params with randomized objectives')
         
     p_val_fc = my_ops.fc_net(p_concat_fc, self.fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9)
     p_adv_fc = my_ops.fc_net(p_concat_fc, self.fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9)
     
     adv_reshape = tf.reshape(p_adv_fc, [-1, len(self.net_discrete_actions), self.target_dim])
     
     pred_all_nomean = adv_reshape - tf.reduce_mean(adv_reshape, reduction_indices=1, keep_dims=True)
     pred_all = pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.target_dim])
     pred_relevant = tf.boolean_mask(pred_all, tf.cast(input_actions, tf.bool))
     
     return pred_all, pred_relevant
Exemplo n.º 8
0
    def collapse_shape(self, collapse):
        """Collapses the table along the axes given.

        Parameters
        ----------
        collapse : A sequence of booleans of length dims.  If an entry is set to True, 
            then this dimension is summed over.  For a 2x2 table, collapse=(False, True)
            would sum over the second index, leaving the binning structure of the first.

        Returns
        -------
        A contingency table with dimensions equal to all non-collapsed indices.
        """
        if len(collapse) != self.dims:
            raise RuntimeError("Collapse mask must have length equal to number of table dimensions.")

        # Sum table over indices in collapse
        observed = np.copy(self.observed)
        expected = np.copy(self.expected)
        for d in range(self.dims-1, -1, -1):
            if collapse[d]:
                observed = observed.sum(d)
                expected = expected.sum(d)

        # Regenerate parameters
        if self.bins == None:
            bins = None
        else:
            bins = []
            for d in range(self.dims):
                if not collapse[d]:
                    bins.append(self.bins[d])
            bins = tuple(bins)
        
        return ContingencyTable(observed, expected, bins)
Exemplo n.º 9
0
def copyCase(case):
    ppc = {"version": 2}
    ppc["baseMVA"] = 100.0
    ppc["bus"] = copy(case["bus"])
    ppc["gen"] = copy(case["gen"])
    ppc["branch"] = copy(case["branch"])
    return ppc;
Exemplo n.º 10
0
def _compute_positions(spin, seed_pos, seed_spin):
    """Given an array of SPINS and two SEED_POSITION and SEED_SPIN values, compute the positions along the prime hex 

    """

    logger.info("compute_positions: starting aux calculations")
    delta = np.zeros_like(spin)
    delta[0] = spin[0] - seed_spin  # first delta is cur_spin - prev_spin from seed_spin
    delta[1:] = spin[1:] - spin[:-1]  # delta is cur_spin - prev_spin
    logger.info("compute_positions: delta={}".format(delta))

    increments = np.copy(spin)  # copy the spin array,
    increments[delta != 0] = 0  # set any non-zero delta to zero in the increment array
    logger.info("compute_positions: increments={}".format(increments))

    logger.info("compute_positions:\tdone with aux calculations")

    logger.info("compute_positions: starting primary calculation")

    # start at seed, cumulative add
    positions = np.copy(increments)
    # increments[0] = seed_pos
    outpositions = (seed_pos + np.cumsum(increments)) % 6
    logger.info("compute_positions: outpositions={}".format(outpositions))
    logger.info("compute_positions:\tdone with primary calculation")
    return outpositions
Exemplo n.º 11
0
 def clear(self):
     self.new_x = copy(self.x)
     self.new_y = copy(self.y)
     args = ()
     for i in range(len(self.x)):
         args = args + (self.new_x[i], self.new_y[i])
     self.plot(*args)
Exemplo n.º 12
0
def cap(guess_vector):
    """
    This takes the Euler equations, and sets them equal to zero for an f-solve
    Remember that Keq was found by taking the derivative of the sum of the 
        utility functions, with respect to k in each time period, and that 
        leq was the same, but because l only shows up in 1 period, it has a
        much smaller term.

    ### Paramaters ###
    guess_vector: The first half is the intial guess for the kapital, and
        the second half is the intial guess for the labor
    """
    #equations for keq
    ks = np.zeros(periods)
    ks[1:] = guess_vector[:periods-1]
    ls  = guess_vector[periods-1:]
    kk  = ks[:-1]
    kk1 = ks[1:]
    kk2 = np.zeros(periods-1)
    kk2[:-1] = ks[2:]
    lk  = ls[:-1]
    lk1 = ls[1:]
    #equation for leq
    ll = np.copy(ls)
    kl = np.copy(ks)
    kl1 = np.zeros(periods)
    kl1[:-1] = kl[1:]
    w = wage(ks, ls)
    r = rate(ks, ls)
    keq = ((lk*w+(1.+r-delta)*kk - kk1)**-gamma - (beta*(1+r-delta)*(lk1*w+(1+r-delta)*kk1-kk2)**-gamma))
    leq = ((w*(ll*w + (1+r-delta)*kl-kl1)**-gamma)-(1-ll)**-sigma)
    error = np.append(keq, leq)

    return np.append(keq, leq)
Exemplo n.º 13
0
def lossFun(inputs, targets, hprev):
  """
  inputs,targets are both list of integers.
  hprev is Hx1 array of initial hidden state
  returns the loss, gradients on model parameters, and last hidden state
  """
  xs, hs, ys, ps = {}, {}, {}, {}
  hs[-1] = np.copy(hprev)
  loss = 0
  # forward pass
  for t in xrange(len(inputs)):
    xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
    xs[t][inputs[t]] = 1
    hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state
    ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
    ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
    loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
  # backward pass: compute gradients going backwards
  dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
  dbh, dby = np.zeros_like(bh), np.zeros_like(by)
  dhnext = np.zeros_like(hs[0])
  for t in reversed(xrange(len(inputs))):
    dy = np.copy(ps[t])
    dy[targets[t]] -= 1 # backprop into y
    dWhy += np.dot(dy, hs[t].T)
    dby += dy
    dh = np.dot(Why.T, dy) + dhnext # backprop into h
    dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
    dbh += dhraw
    dWxh += np.dot(dhraw, xs[t].T)
    dWhh += np.dot(dhraw, hs[t-1].T)
    dhnext = np.dot(Whh.T, dhraw)
  for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
    np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
  return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
 def get_unidirectional_S(self):
     S_plus = np.copy(self.S)
     S_minus = np.copy(self.S)
     S_plus[self.S < 0] = 0
     S_minus[self.S > 0] = 0
     return S_minus, S_plus
     
Exemplo n.º 15
0
    def make_corners(self, f):
        """
        The standard mom grid includes t-cell corners be specifying the u, v
        grid. Here we extract that and put it into the format expected by
        the regridder and OASIS.
        """

        x = np.copy(f.variables['x'])
        y = np.copy(f.variables['y'])

        self.clon = np.zeros((4, x.shape[0] / 2, x.shape[1] / 2))
        self.clon[:] = np.NAN
        self.clat = np.zeros((4, x.shape[0] / 2, x.shape[1] / 2))
        self.clat[:] = np.NAN

        # Corner lats. 0 is bottom left and then counter-clockwise. 
        # This is the OASIS convention. 
        self.clat[0,:,:] = y[0:-1:2,0:-1:2]
        self.clat[1,:,:] = y[0:-1:2,2::2]
        self.clat[2,:,:] = y[2::2,2::2]
        self.clat[3,:,:] = y[2::2,0:-1:2]

        # Corner lons.
        self.clon[0,:,:] = x[0:-1:2,0:-1:2]
        self.clon[1,:,:] = x[0:-1:2,2::2]
        self.clon[2,:,:] = x[2::2,2::2]
        self.clon[3,:,:] = x[2::2,0:-1:2]

        # Select points from double density grid. Southern most U points are
        # excluded. also the last (Eastern) U points, they are duplicates of
        # the first.
        self.x_t = x[1::2,1::2]
        self.y_t = y[1::2,1::2]
        self.x_u = x[2::2,0:-1:2]
        self.y_u = y[2::2,0:-1:2]
Exemplo n.º 16
0
def crop_data(bg, overlay):
    '''
    Crop the data to get ride of large amounts of black space surrounding the
    background image.
    '''
    #---------------------------------------------------------------
    # First find all the slices that contain data you want
    slices_list_x = list(np.argwhere(np.sum(bg, (1,2)) != 0)[:,0])
    slices_list_y = list(np.argwhere(np.sum(bg, (0,2)) != 0)[:,0])
    slices_list_z = list(np.argwhere(np.sum(bg, (0,1)) != 0)[:,0])

    slices_list = [slices_list_x, slices_list_y, slices_list_z]
    
    #---------------------------------------------------------------
    # Make a copy of the data
    bg_cropped = np.copy(bg)
    overlay_cropped = np.copy(overlay)
    
    #---------------------------------------------------------------
    # Remove all slices that have no data in the background image
    bg_cropped = bg_cropped[ slices_list_x, :, : ]
    overlay_cropped = overlay_cropped[ slices_list_x, :, : ]
    
    bg_cropped = bg_cropped[ :, slices_list_y, : ]
    overlay_cropped = overlay_cropped[ :, slices_list_y, : ]
    
    bg_cropped = bg_cropped[ :, :, slices_list_z ]
    overlay_cropped = overlay_cropped[ :, :, slices_list_z ]
        
    return bg_cropped, overlay_cropped, slices_list
Exemplo n.º 17
0
    def getCurrentSpectrum(self):
        # self.c_rfi.execute("SELECT spectrum, timestamp from spectra_%i where timestamp = (SELECT max(timestamp) from spectra_%i)"%(self.which_db, self.which_db))
        # result = self.c_rfi.fetchone()
        # self.c_rfi.execute("FLUSH QUERY CACHE")

        data = numpy.copy(self.curr)

        result = [cnf.remove_internal_RFI(data,self.mode[0]), self.time[0], self.mode[0]]

        # print "current timestamp = %i"%self.time[0]

        while self.last_timestamp == result[1]: #Current timestamp equals last timestamp
            data = numpy.copy(self.curr)
            result = [cnf.remove_internal_RFI(data,self.mode[0]), self.time[0], self.mode[0]]
            time.sleep(0.1)
            # res = self.c_rfi.fetchall()
            # if res[0][0] != self.which_db: #Check if using the correct DB
            #     self.which_db = res[0][0]
            # while self.last_timestamp == result[1]:
            #     self.c_rfi.execute("SELECT spectrum, timestamp from spectra_%i where timestamp = (SELECT max(timestamp) from spectra_%i)"%(self.which_db, self.which_db))
            #     result = self.c_rfi.fetchone()
            #     self.c_rfi.execute("FLUSH QUERY CACHE")
            #     time.sleep(0.1)
        self.last_timestamp = result[1]
        return (result[0], result[1], result[2] - 1)
Exemplo n.º 18
0
    def test_step_back(self):
        centroids = [[5.2, 3.1], [6.5, 3], [7, 4]]
        self.kmeans.add_centroids(centroids)

        # check if nothing happens when step = 0
        centroids_before = np.copy(self.kmeans.centroids)
        clusters_before = np.copy(self.kmeans.clusters)
        self.kmeans.step_back()
        np.testing.assert_equal(centroids_before, self.kmeans.centroids)
        np.testing.assert_equal(clusters_before, self.kmeans.clusters)

        # check if centroids remain in even step
        self.kmeans.step()
        self.kmeans.step()

        centroids_before = self.kmeans.centroids
        self.kmeans.step_back()
        np.testing.assert_equal(centroids_before, self.kmeans.centroids)
        self.assertEqual(self.kmeans.step_completed, False)
        self.assertEqual(self.kmeans.centroids_moved, False)

        # check if clusters remain in even step
        clusters_before = self.kmeans.clusters
        self.kmeans.step_back()
        np.testing.assert_equal(clusters_before, self.kmeans.clusters)
        self.assertEqual(self.kmeans.step_completed, True)
        self.assertEqual(self.kmeans.centroids_moved, True)
Exemplo n.º 19
0
    def simul(self, nbIte, epsilon, pi0):
        """
        fonction qui, à chaque pas de temps, calcule πt
        à partir d’un π0, d’un nombre d’itérations limite,
        et toujours d’un seuil epsilon
        """
        self.res = np.copy(pi0)
        temp = np.copy(pi0)

        def abs_mat(matrice):
            """
            fonction qui met toutes les valeurs d'une matrice en valeur absolue
            """
            for i in range(len(matrice)):
                for j in range(len(matrice[0])):
                    matrice[i][j] = abs(matrice[i][j])
            return matrice

        for i in range(nbIte):
            temp = np.copy(self.res)

            self.res = self.graph.nextStep(np.matrix(self.res))
            """
            il nous reste :
            -> calculer epsilon (diff puissance matrice?)
            -> verifier condition epsilon
            -> courbe d'epsilon                        
            """
            diff = abs_mat(temp - self.res)
            eps = np.matrix.max(np.matrix(diff))

            self.liste_epsilon.append(eps)
            if eps < epsilon:
                break
Exemplo n.º 20
0
def build_models2(X):
	mu_init = [-13, -4, 50]
	sigmasq_init = [5, 14, 30] 
	wt_init = [0.2, 0.4, 0.4]
	its = 20

	L = []
	mu = np.copy(mu_init)
	sigmasq = np.copy(sigmasq_init)
	wt = np.copy(wt_init)

	#firt iteration
	result = gmmest(X, mu_init, sigmasq_init, wt_init, its)
	mu = np.array(result[0][:])
	sigmasq = np.array(result[1][:])
	wt = np.array(result[2][:])
	L.append(result[3])

	#rest of iterations
	for i in range(its-1):
		result = gmmest(X, mu, sigmasq, wt, 1)
		mu = np.array(result[0][:])
		sigmasq = np.array(result[1][:])
		wt = np.array(result[2][:])
		L.append(result[3])

	
	return result, L
Exemplo n.º 21
0
def build_models1(X):
	#by observing the hist of data
	mu_init = np.array([7, 25])
	sigmasq_init = np.array([5, 3])
	wt_init = np.array([0.7, 0.3])
	its = 20

	print("initial L")
	print(result_prob(X, mu_init, sigmasq_init, wt_init))

	L = []
	mu = np.copy(mu_init)
	sigmasq = np.copy(sigmasq_init)
	wt = np.copy(wt_init)
	
	#firt iteration
	result = gmmest(X, mu_init, sigmasq_init, wt_init, its)
	mu = np.array(result[0][:])
	sigmasq = np.array(result[1][:])
	wt = np.array(result[2][:])
	L.append(result[3])

	#print(result[3])

	#rest of iterations
	for i in range(its-1):
		result = gmmest(X, mu, sigmasq, wt, 1)
		mu = np.array(result[0][:])
		sigmasq = np.array(result[1][:])
		wt = np.array(result[2][:])
		L.append(result[3])
		#print(result[3])

	#print(L)
	return result, L
Exemplo n.º 22
0
    def cluster(self, data, n_clusters):

        n, d = shape(data)
        locations = zeros((self.n_particles, n_clusters, d))

        for i in range(self.n_particles):
            for j in range(n_clusters):
                locations[i, j, :] = copy(data[randint(n), :])  # Initialize cluster centers to random datapoints

        bestlocations = copy(locations)
        velocities = zeros((self.n_particles, n_clusters, d))

        bestscores = [score(data, centroids=locations[i, :, :], norm=self.norm) for i in range(self.n_particles)]
        sbestlocation = copy(locations[argmin(bestscores), :, :])
        sbestscore = min(bestscores)

        for i in range(self.n_iterations):
            if i % self.printfreq == 0:
                print "Particle swarm iteration", i, "best score:", sbestscore
            for j in range(self.n_particles):
                r = rand(n_clusters, d)
                s = rand(n_clusters, d)
                velocities[j, :, :] = (self.w * velocities[j, :, :]) + \
                                      (self.c1 * r * (bestlocations[j, :, :] - locations[j, :, :])) + \
                                      (self.c2 * s * (sbestlocation - locations[j, :, :]))
                locations[j, :, :] = locations[j, :, :] + velocities[j, :, :]
                currentscore = score(data, centroids=locations[j, :, :], norm=self.norm)
                if currentscore < bestscores[j]:
                    bestscores[j] = currentscore
                    bestlocations[j, :, :] = locations[j, :, :]
                    if currentscore < sbestscore:
                        sbestscore = currentscore
                        sbestlocation = copy(locations[j, :, :])

        return getlabels(data, centroids=sbestlocation, norm=self.norm)
Exemplo n.º 23
0
def classify(image, hog, rho, max_detected=8):
    image_boxes = np.copy(image)
    found = hog.detect(image_boxes, winStride=(1, 1))

    if len(found[0]) == 0:
        return "female", image_boxes, 0

    scores = np.zeros(found[1].shape[0])
    for index, score in enumerate(found[1]):
        scores[index] = found[1][index][0]
    order = np.argsort(scores)

    image_boxes = np.copy(image)
    index = 0
    while index < max_detected and found[1][order[index]] - rho < 0:
        current = found[0][order[index], :]
        x, y = current
        h = hog.compute(image[y : (y + win_height), x : (x + win_width), :])
        colour = (0, 255, 0)
        cv2.rectangle(image_boxes, (x, y), (x + win_width, y + win_height), colour, 1)
        index += 1
        # print 'Number of detected objects = %d' % index

    return (
        "male" if index > 0 else "female",
        image_boxes,
        index,
        found[0][order[(index - 1) : index], :],
        found[1][order[(index - 1) : index]],
    )
Exemplo n.º 24
0
def Solver2(A,m,n,disc,Sol,score):
    "Burst the component with maximum bubbles"
    score=0
    Acopy=np.copy(A)#copy of A.So that changes to A aren't reflected outside.Copy by value
    #I=0
    while(1):
        #I=I+1
        #print str(I)+"th iteration"
        components=[]
        disc[:]=0
        for i in xrange(m):
            for j in xrange(n):
                if Acopy[i][j]!=-1:# an unburst bubble
                    l=bfs(Acopy,i,j,disc,m,n)
                    components.append(l)
        #print "No of components=",len(components)            
        if len(components)>0:            
            maxim=len(components[0])
        else:#game over all bubble gone
            #print "Game over all finished."
            Sol.append(score)
            break    
        b=components[0]#initializing component which will be burst
        for c in components:
            if len(c)>maxim:
                maxim=len(c)
                b=c
        if maxim==1:#this means game over
            #print "Game over"
            Sol.append(score)
            break
        burst(Acopy,b,m,n)
        Mcopy=np.copy(Acopy)
        Sol.append([Mcopy,len(b)**2])
        score=score+len(b)**2            
Exemplo n.º 25
0
def plot_cumulative_score(smod,
                          seqs,
                          size=(6, 2),
                          fname=None):
    """plot_cumulative_score."""
    sig = cumulative_score(seqs, smod)
    plt.figure(figsize=size)
    sigp = np.copy(sig)
    sigp[sigp < 0] = 0
    plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g')
    sign = np.copy(sig)
    sign[sign >= 0] = 0
    plt.bar(range(len(sign)), sign, alpha=0.3, color='r')
    plt.grid()
    plt.xlabel('Position')
    plt.ylabel('Importance score')
    if fname:
        plt.draw()
        figname = '%s_importance.png' % (fname)
        plt.savefig(
            figname, bbox_inches='tight', transparent=True, pad_inches=0)
    else:
        figname = None
        plt.show()
    plt.close()
    return figname
Exemplo n.º 26
0
def local_search( start_node, goal_cost=0 ):
    node = start_node
    
    while node.cost > goal_cost:
      
      while True:
         """
         Pick a random row and check if it is causing conflicts
         """
         i = random.randint( 0, start_node.gene.shape[0]-1 ) # select row to manipulate
         tmp = np.copy( node.gene )
         tmp[i,:]=0
         if objective_function(tmp)<node.cost:
            # Yep, this caused some conflicts
            break
      
      neighbors = [ (float("inf"),None) ]

      for modified in generate_permutations(node.gene[i]):
        tmp = np.copy( node.gene )
        tmp[i] = modified
        node = Board(tmp)
        
        if node.cost == neighbors[0][0]:
           neighbors.append((node.cost,node))
        elif node.cost<neighbors[0][0]:
           neighbors = [ (node.cost,node)]
           
      
      node = random.choice(neighbors)[1]
    #end while
    
    return node
Exemplo n.º 27
0
def Solver3(A,m,n,disc,Sol,score):
    "Burst a component at random"
    score=0
    Acopy=np.copy(A)
    while(1):
        #I=I+1
        #print str(I)+"th iteration"
        components=[]
        disc[:]=0
        for i in xrange(m):
            for j in xrange(n):
                if Acopy[i][j]!=-1:# an unburst bubble
                    l=bfs(Acopy,i,j,disc,m,n)
                    components.append(l)
        #print "No of components=",len(components)            
        if len(components)==0:            
            Sol.append(score)
            break
        breakable=[]#contains those components which can be burst    
        for c in components:
            if len(c)>1:
               breakable.append(c)
        if len(breakable)==0:
            Sol.append(score)
            break               
        b=breakable[random.randrange(0,len(breakable))]#random component which will be burst
        burst(Acopy,b,m,n)
        Mcopy=np.copy(Acopy)
        Sol.append([Mcopy,len(b)**2])
        score=score+len(b)**2       
def get_centroids(points, k):
    '''KMeans++的初始化聚类中心的方法
    input:  points(mat):样本
            k(int):聚类中心的个数
    output: cluster_centers(mat):初始化后的聚类中心
    '''
    m, n = np.shape(points)
    cluster_centers = np.mat(np.zeros((k , n)))
    # 1、随机选择一个样本点为第一个聚类中心
    index = np.random.randint(0, m)
    cluster_centers[0, ] = np.copy(points[index, ])
    # 2、初始化一个距离的序列
    d = [0.0 for _ in xrange(m)]
 
    for i in xrange(1, k):
        sum_all = 0
        for j in xrange(m):
            # 3、对每一个样本找到最近的聚类中心点
            d[j] = nearest(points[j, ], cluster_centers[0:i, ])
            # 4、将所有的最短距离相加
            sum_all += d[j]
        # 5、取得sum_all之间的随机值
        sum_all *= random()
        # 6、获得距离最远的样本点作为聚类中心点
        for j, di in enumerate(d):
            sum_all -= di
            if sum_all > 0:
                continue
            cluster_centers[i] = np.copy(points[j, ])
            break
    return cluster_centers
Exemplo n.º 29
0
def hun(costMatrix):

    # Check first, if costmatrix is not empty
    if costMatrix.shape==(0,0):
        return []

    # Create squared temporary matrix
    tmpMatrix = numpy.copy(costMatrix)
    tmpMatrix = makeSquareWithNegValues(tmpMatrix)
    sqCostMatrix = numpy.copy(tmpMatrix)
    sqCostMatrix[tmpMatrix==-1]=10e10

    # Solve ASP on the temporary matrix
    m=Munkres()
    i=m.compute(sqCostMatrix)


    # Create resultin matrix that contains ones at matching
    # objects and remove all excluded matches
    binMatrix = numpy.zeros( tmpMatrix.shape,dtype=bool )
    for x,y in i:
        if tmpMatrix[x,y]==-1:
            continue
        binMatrix[x,y]=True

    return binMatrix
Exemplo n.º 30
0
 def __array__(self, dtype=None):
     if self.size:
         arrayfire.backend.get().af_get_data_ptr(ctypes.c_void_p(self.h_array.ctypes.data), self.d_array.arr)
     if dtype is None:
         return numpy.copy(self.h_array)
     else:
         return numpy.copy(self.h_array).astype(dtype)
Exemplo n.º 31
0
    def step(self, dt=None, subdivisions=1, retries=0, tol=1, norm=2,
             coeffs=[0.5, 0.5, 0, 0.5, 0], safety_factor=1.0, **kwargs):
        """
        Advance model forward to next timestep.

        Inputs:
        -------
        dt : float
            Timestep
        subdivisions : int
            Number of subdivisions for error estimation
        retries : int
            Number of retries if error exceeds allowed threshold
        tol : float
            Tolerance of scaled truncation error
        norm : float
            Norm to apply when computing total truncation error
        coeffs : list (length 6)
            Filter coefficients for computing adaptive step size
        safety_factor : float
            Safety factor for adaptive step size
        kwargs : **dict
            Keyword arguments passed to self.model.step
        """
        if (self._iter_count == 0):
            self._clock_start_time = time.time()
        if dt is None:
            dt = self.dt
        else:
            self.dt = dt
        # Advance model forward one large step
        self._step(dt=dt, **kwargs)
        # NOTE: This is stored after stepping, because of the way save state works
        if retries:
            initial_state = copy.deepcopy(self.model.states)
        else:
            initial_state = self.model.states
        err = None
        # If using adaptive time-stepping...
        if subdivisions > 1:
            # Copy coarse-stepped estimate of state
            states_coarse = np.copy(self.model.H_j)
            # Load previous state
            self.load_state(initial_state)
            # Advance model forward with number of steps given by subdivisions
            for _ in range(subdivisions):
                self._step(dt=dt / subdivisions, **kwargs)
            # Copy fine-stepped estimate of state
            states_fine = np.copy(self.model.H_j)
            # TODO: Is there a way to generalize this error metric?
            raw_err = states_coarse - states_fine
            scaled_err = self._scaled_error(raw_err)
            err = self._normed_error(scaled_err, norm=norm)
        # Set instance variables
        self.dt = dt
        self.dts.appendleft(dt)
        self.err = err
        self.errs.appendleft(err)
        # TODO: This will not save the dt needed for the next step
        if ((retries) and (err is not None)):
            min_dt = self.min_dt
            # dt = self.compute_step_size(dt, tol=tol, err=err)
            # dt = self.filter_step_size(tol=tol, coeffs=coeffs,
            #                            safety_factor=safety_factor)
            dt = 0.5 * dt
            if ((err > tol) or (not np.isfinite(err))) and (dt > min_dt):
                self.dts.popleft()
                self.errs.popleft()
                self.load_state(initial_state)
                self.step(dt=dt, subdivisions=subdivisions, retries=retries-1, **kwargs)
        assert np.isfinite(self.model.H_j).all()
        self._iter_count += 1
Exemplo n.º 32
0
S = None

for file_name in args.source:
    print('Source file: {s}'.format(s=file_name))
    with open(file_name, newline='') as csvfile:
        reader = csv.reader(csvfile)
        rid = 0
        count = 0
        for row in reader:
            rid = rid + 1
            if rid > 2 or (rid > 1 and args.pareto_in):
                values = np.asarray(list(map(float, row)))
                if args.pareto_in:
                    if len(values) == 30: # oops - missing the Nr column
                        X    = np.copy(values[1:])
                        X[0] = args.rings
                    else:
                        X    = values[2:]
                    cost   = values[0:2]
                elif args.without_Nc:
                    X      = values[4:]
                    cost   = np.asarray([values[3], values[2] * 1E3])
                else:
                    if args.filter_hybrid and values[5] - values[4] < 1.5:
                        # For purely lamella designs, Nc = Nr - 1
                        continue
                    X      = values[5:]
                    cost   = np.asarray([values[3], values[2] * 1E3])

                if args.rings > 0:
Exemplo n.º 33
0
        elif val < minimun_val and start_i is not None:
            end_i = i
            if end_i - start_i >= minimun_range:
                peek_ranges.append((start_i, end_i))
            start_i = None
            end_i = None
        elif val < minimun_val and start_i is None:
            pass
        else:
            raise ValueError("cannot parse this case...")
    return peek_ranges


peek_ranges = extract_peek_ranges_from_array(horinzontal_sum)

line_seg_adaptive_threshold = np.copy(adaptive_threshold)
for i, peek_range in enumerate(peek_ranges):
    x = 0
    y = peek_range[0]
    w = line_seg_adaptive_threshold.shape[1]
    h = peek_range[1] - y
    pt1 = (x, y)
    pt2 = (x + w, y + h)
    cv2.rectangle(line_seg_adaptive_threshold, pt1, pt2, 255)
cv2.imshow('line image', line_seg_adaptive_threshold)
cv2.waitKey(0)


def median_split_ranges(peek_ranges):
    new_peek_ranges = []
    widthes = []
Exemplo n.º 34
0
 def get_groundstate(self):
     return np.copy(self.groundstate)
Exemplo n.º 35
0

#Read data from csv file with pandas modules
df = pd.read_csv(
    'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
    header=None)
df.tail()

# select setosa and versicolor
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)

# extract sepal length and petal length
X = df.iloc[0:100, [0, 2]].values

X_std = np.copy(X)
X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()

ada = AdalineSGD(n_iter=15, eta=0.01, random_state=1)
ada.fit(X_std, y)


def plot_decision_regions(X, y, classifier, resolution=0.02):

    # setup marker generator and color map
    markers = ('s', 'x', 'o', '^', 'v')
    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
    cmap = ListedColormap(colors[:len(np.unique(y))])

    # plot the decision surface
Exemplo n.º 36
0
                e_map[i, j] += min_cost
                backtrack[i, j] = j + min_idx - 1
    return (e_map, backtrack)


# example to reduce a defined number of pixels in an image
# the range for the for loop will be the number of these pixels.
# For each pixel to be removed:
# 1-Calculate energy of image
# 2-Calculate seam cost forward
# 3-Find minimum seam
# 4-Draw seam
# 5-Remove seam and traverse pixels left
t0 = time.time()
path = os.path.join(os.getcwd(), 'fig5')  # replace fig5 with the actual path
img = np.copy(fig5)
for c in range(350):  # 350 denotes the amount of pixels to be reduced from the image
    energy_map = calc_img_energy(img)
    energy_map_forward, backtrack = calc_seam_cost_forward(energy_map)
    (min_seam, cost) = find_min_seam(energy_map_forward, backtrack)
    bgr_img_with_seam = draw_seam(img, min_seam)
    # can remove this to not store every seam image
    cv2.imwrite('%s%s.png' % (path, c), bgr_img_with_seam)
    img = remove_seam(img, min_seam)
# output image stored seprately
cv2.imwrite('%sfig5_resized.png' % (path), img)
t1 = time.time()
total = t1-t0
# ISSUE!!! takes upto 4-5 minutes to remove 100 pixels
print("Total Time: %d" % total)
Exemplo n.º 37
0
 def __init__(self, model, Q_in=None, H_bc=None, Q_Ik=None, t_start=None,
              t_end=None, dt=None, max_iter=None, min_dt=1, max_dt=200,
              tol=0.01, min_rel_change=1e-10, max_rel_change=1e10, safety_factor=0.9,
              Qcov=None, Rcov=None, C=None, H=None, interpolation_method='linear'):
     self.model = model
     if Q_in is not None:
         self.Q_in = Q_in.copy(deep=True)
         self.Q_in = self.Q_in.iloc[:, model.permutations]
         self.Q_in.index = self.Q_in.index.astype(float)
     else:
         self.Q_in = Q_in
     if H_bc is not None:
         self.H_bc = H_bc.copy(deep=True)
         self.H_bc = self.H_bc.iloc[:, model.permutations]
         self.H_bc.index = self.H_bc.index.astype(float)
     else:
         self.H_bc = H_bc
     if Q_Ik is not None:
         self.Q_Ik = Q_Ik.copy(deep=True)
         self.Q_Ik.index = self.Q_Ik.index.astype(float)
     else:
         self.Q_Ik = Q_Ik
     self.inputs = (self.Q_in, self.H_bc, self.Q_Ik)
     any_inputs = any(inp is not None for inp in self.inputs)
     if dt is None:
         dt = model._dt
     self.dt = dt
     self.max_iter = max_iter
     # Sample interpolation method
     if interpolation_method.lower() == 'linear':
         self.interpolation = 1
     elif interpolation_method.lower() == 'nearest':
         self.interpolation = 0
     else:
         raise ValueError('Argument `interpolation_method` must be one of `linear` or `nearest`.')
     # Adaptive step size handling
     self.err = None
     self.min_dt = min_dt
     self.max_dt = max_dt
     self.tol = tol
     self.min_rel_change = min_rel_change
     self.max_rel_change = max_rel_change
     self.safety_factor = safety_factor
     # Add queue of dts
     self.dts = deque([dt], maxlen=3)
     self.errs = deque([eps], maxlen=3)
     self.h0100 = [1, 0, 0, 0, 0]
     self.h0211 = [1/2, 1/2, 0, 1/2, 0]
     self.h211  = [1/6, 1/6, 0, 0, 0]
     self.h0312 = [1/4, 1/2, 1/4, 3/4, 1/4]
     self.h312  = [1/18, 1/9, 1/18, 0, 0]
     self.h0321 = [5/4, 1/2, -3/4, -1/4, -3/4]
     self.h321  = [1/3, 1/18, -5/18, -5/6, -1/6]
     # Boundary conditions for convenience
     self.bc = self.model.bc
     # TODO: This needs to be generalized
     self.state_variables = {'H_j' : 'j',
                             'h_Ik' : 'Ik',
                             'Q_ik' : 'ik',
                             'Q_uk' : 'k',
                             'Q_dk' : 'k',
                             'Q_o' : 'o',
                             'Q_w' : 'w',
                             'Q_p' : 'p',
                             'x_Ik' : 'Ik'}
     if t_start is None:
         if any_inputs:
             self.t_start = min(i.index.min() for i in self.inputs if i is not None)
             self.model.t = self.t_start
         else:
             self.t_start = model.t
     else:
         self.t_start = t_start
     if t_end is None:
         if any_inputs:
             self.t_end = max(i.index.max() for i in self.inputs if i is not None)
         else:
             self.t_end = np.inf
     else:
         self.t_end = t_end
     # Configure kalman filtering
     if Rcov is None:
         self.Rcov = np.zeros((model.M, model.M))
     elif np.isscalar(Rcov):
         self.Rcov = Rcov * np.eye(model.M)
     elif (Rcov.shape[0] == Rcov.size):
         assert isinstance(Rcov, np.ndarray)
         self.Rcov = np.diag(Rcov)
     else:
         assert isinstance(Rcov, np.ndarray)
         self.Rcov = Rcov
     if Qcov is None:
         self.Qcov = np.zeros((model.M, model.M))
     elif np.isscalar(Qcov):
         self.Qcov = Qcov * np.eye(model.M)
     elif (Qcov.shape[0] == Qcov.size):
         assert isinstance(Qcov, np.ndarray)
         self.Qcov = np.diag(Qcov)
     else:
         assert isinstance(Qcov, np.ndarray)
         self.Qcov = Qcov
     if C is None:
         self.C = np.eye(model.M)
     elif np.isscalar(C):
         self.C = C * np.eye(model.M)
     elif (C.shape[0] == C.size):
         assert isinstance(C, np.ndarray)
         self.C = np.diag(C)
     else:
         assert isinstance(C, np.ndarray)
         self.C = C
     if H is None:
         self.H = np.eye(model.M)
     elif np.isscalar(H):
         self.H = H * np.eye(model.M)
     elif (H.shape[0] == H.size):
         assert isinstance(H, np.ndarray)
         self.H = np.diag(H)
     else:
         assert isinstance(H, np.ndarray)
         self.H = H
     self.P_x_k_k = self.C @ self.Qcov @ self.C.T
     # Progress bar checkpoints
     if np.isfinite(self.t_end):
         self._checkpoints = np.linspace(self.t_start, self.t_end)
     else:
         self._checkpoints = np.array([np.inf])
     self._checkpoint_num = 0
     self._iter_count = 0
     self._clock_start_time = 0
     self._clock_current_time = 0
     # Create a sequence iterator
     if max_iter is None:
         self.steps = count()
     else:
         self.steps = range(max_iter)
     self.states = States()
     for state in self.state_variables:
         if state in self.model.states:
             setattr(self.states, state, {})
             getattr(self.states, state).update({float(model.t) :
                                                 np.copy(model.states[state])})
Exemplo n.º 38
0
    def __init__(self, weights, thresh_factor=1.0):

        self.weights = check_float(weights)
        self.original_weights = np.copy(self.weights)
        self.thresh_factor = check_float(thresh_factor)
        self._rw_num = 1
Exemplo n.º 39
0
    def generate_paths(self,NumSimulations, NumSteps, Expiry, F_0):
        """
        Doing only Euler Discretisation with zero absorbing boundary \n
        No Antiethic Variates \n
        Number of Simulations: \n
        Number of TimeSteps: \n
        Expiry: \n
        Initial value of F_0: \n
        
        """
            
        dt = float(Expiry) / float(NumSteps)
        dt_sqrt = np.sqrt(dt)
        
        pathContainer = np.zeros((NumSimulations,NumSteps + 1) )
        pathContainer[:,0] = F_0                                 #Fill up the first column with initial value of F_0;

                     
        Updating_F = np.copy(pathContainer[:,0])
        Updating_a = np.ones(NumSimulations)*self.a
        
        correlationMatrix = np.array([[1.0, self.rho] , [self.rho , 1]])
        MatrixDecompositions.draw_N_randomNumbers(correlationMatrix)
        
                  
        for i in range(1,NumSteps + 1):
    
            for j in range(NumSimulations):

                #print "time: {} , Simulation: {}".format(i,j)
                #print "pathContainer j: {} , i: {}".format(j,i)
                
#                if ((self.beta > 0 and self.beta < 1) and F_t <= 0):
#                    F_t = 0
#                    pathContainer[i,j] = F_t
#                else:
#                    rand = MatrixDecompositions.draw_N_randomNumbers(correlationMatrix)
#                    dW_F = dt_sqrt * rand[0]
#                    
#                    Updating_F[j + 1] = Updating_F[j] + ( Updating_a[j] * np.power(abs(Updating_F[j]),self.beta) * dW_F)
#                    pathContainer[i,j] = Updating_F[j + 1]
#                    
#                    dW_a = dt_sqrt * rand[1]
#                    Updating_a[j + 1] = Updating_a[j] + (self.nu*Updating_a[j]*dW_a)
                
                rand = MatrixDecompositions.draw_N_randomNumbers(correlationMatrix)
                dW_F = dt_sqrt * rand[0]
                
                old_Fj = Updating_F[j]
                Updating_F[j] = Updating_F[j] + ( Updating_a[j] * np.power(abs(Updating_F[j]),self.beta) * dW_F)
                probability = np.exp( -2.0*old_Fj*Updating_F[j] / (Updating_a[j]*Updating_a[j]*np.power(Updating_F[j],2*self.beta)*dt)        )
                if ( (Updating_F[j] > 0.0) and (np.random.uniform(0,1) > probability) ):
                    pathContainer[j,i] = Updating_F[j]
                else:
                    pathContainer[j,i] = 0.0
                    
                dW_a = dt_sqrt * rand[1]
                Updating_a[j] = Updating_a[j]*np.exp(self.nu*dt_sqrt*dW_a - 0.5*self.nu*self.nu*dt)
                
                
        return pathContainer
Exemplo n.º 40
0
def plot_car(x, y, yaw, steer=0.0, cabcolor="-r", truckcolor="-k", alpha = 1):

    # Vehicle parameters
    LENGTH = 0.4  # [m]
    WIDTH = 0.2  # [m]
    BACKTOWHEEL = 0.1  # [m]
    WHEEL_LEN = 0.03  # [m]
    WHEEL_WIDTH = 0.02  # [m]
    TREAD = 0.07  # [m]
    WB = 0.25  # [m] wheel base

    outline = np.array([[-BACKTOWHEEL, (LENGTH - BACKTOWHEEL), (LENGTH - BACKTOWHEEL),
                         -BACKTOWHEEL, -BACKTOWHEEL],
                        [WIDTH / 2, WIDTH / 2, - WIDTH / 2, -WIDTH / 2, WIDTH / 2]])

    fr_wheel = np.array([[WHEEL_LEN, -WHEEL_LEN, -WHEEL_LEN, WHEEL_LEN, WHEEL_LEN],
                         [-WHEEL_WIDTH - TREAD, -WHEEL_WIDTH - TREAD, WHEEL_WIDTH -
                          TREAD, WHEEL_WIDTH - TREAD, -WHEEL_WIDTH - TREAD]])


    rr_wheel = np.copy(fr_wheel)

    fl_wheel = np.copy(fr_wheel)
    fl_wheel[1, :] *= -1
    rl_wheel = np.copy(rr_wheel)
    rl_wheel[1, :] *= -1

    # Rotates the body and back wheels
    Rot1 = np.array([[math.cos(yaw), math.sin(yaw)],
                     [-math.sin(yaw), math.cos(yaw)]])

    # Rotates the front wheels
    Rot2 = np.array([[math.cos(steer), math.sin(steer)],
                     [-math.sin(steer), math.cos(steer)]])

    # Translate wheels to origin, do steering rotation, translate wheels back
    fr_wheel[1, :] += 0.07
    fl_wheel[1, :] -= 0.07

    fr_wheel = (fr_wheel.T.dot(Rot2)).T
    fl_wheel = (fl_wheel.T.dot(Rot2)).T

    # Translate wheels to correct positions
    fr_wheel[1, :] -= 0.07
    fl_wheel[1, :] += 0.07

    fr_wheel[0, :] += WB
    fl_wheel[0, :] += WB

    fr_wheel = (fr_wheel.T.dot(Rot1)).T
    fl_wheel = (fl_wheel.T.dot(Rot1)).T

    outline = (outline.T.dot(Rot1)).T
    rr_wheel = (rr_wheel.T.dot(Rot1)).T
    rl_wheel = (rl_wheel.T.dot(Rot1)).T

    scale_by = 15

    outline[0, :] *= scale_by
    outline[0, :] += x

    outline[1, :] *= scale_by
    outline[1, :] += y

    fr_wheel[0, :] *= scale_by
    fr_wheel[0, :] += x

    fr_wheel[1, :] *= scale_by
    fr_wheel[1, :] += y

    rr_wheel[0, :] *= scale_by
    rr_wheel[0, :] += x

    rr_wheel[1, :] *= scale_by
    rr_wheel[1, :] += y

    fl_wheel[0, :] *= scale_by
    fl_wheel[0, :] += x

    fl_wheel[1, :] *= scale_by
    fl_wheel[1, :] += y

    rl_wheel[0, :] *= scale_by
    rl_wheel[0, :] += x

    rl_wheel[1, :] *= scale_by
    rl_wheel[1, :] += y


    plt.plot(np.array(outline[0, :]).flatten(),
             np.array(outline[1, :]).flatten(), truckcolor, alpha = alpha)
    plt.plot(np.array(fr_wheel[0, :]).flatten(),
             np.array(fr_wheel[1, :]).flatten(), truckcolor, alpha = alpha)
    plt.plot(np.array(rr_wheel[0, :]).flatten(),
             np.array(rr_wheel[1, :]).flatten(), truckcolor, alpha = alpha)
    plt.plot(np.array(fl_wheel[0, :]).flatten(),
             np.array(fl_wheel[1, :]).flatten(), truckcolor, alpha = alpha)
    plt.plot(np.array(rl_wheel[0, :]).flatten(),
             np.array(rl_wheel[1, :]).flatten(), truckcolor, alpha = alpha)
    plt.plot(x, y, "*", alpha = alpha)
Exemplo n.º 41
0
def OoM(x):
    x = np.copy(x)
    if not isinstance(x, np.ndarray):
        x = np.array(x)
    x[x==0] = 1                       # if zero, make OoM 0
    return np.floor(np.log10(np.abs(x)))
Exemplo n.º 42
0
    rom40fl = velocity_field(xt,yt,2.0*dia,1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)

    rom15 = np.insert(rom15,0,rom15f)
    rom20 = np.insert(rom20,0,rom20f)
    rom25 = np.insert(rom25,0,rom25f)
    rom30 = np.insert(rom30,0,rom30f)
    rom35 = np.insert(rom35,0,rom35f)
    rom40f = np.insert(rom40f,0,rom40ff)
    rom15 = np.append(rom15,rom15l)
    rom20 = np.append(rom20,rom20l)
    rom25 = np.append(rom25,rom25l)
    rom30 = np.append(rom30,rom30l)
    rom35 = np.append(rom35,rom35l)
    rom40f = np.append(rom40f,rom40fl)

    x15p = np.copy(x15)
    x20p = np.copy(x20)
    x25p = np.copy(x25)
    x30p = np.copy(x30)
    x35p = np.copy(x35)
    x40p = np.copy(x35)

    x15p = np.insert(x15p,0,-1.)
    x20p = np.insert(x20p,0,-1.)
    x25p = np.insert(x25p,0,-1.)
    x30p = np.insert(x30p,0,-1.)
    x35p = np.insert(x35p,0,-1.)
    x40p = np.insert(x40p,0,-1.)
    x15p = np.append(x15p,1.)
    x20p = np.append(x20p,1.)
    x25p = np.append(x25p,1.)
Exemplo n.º 43
0
 def record_timestep(self, t, U, F,
                     state_vars=None):
     super(ShearZoneHist, self).record_timestep(t, U, F, state_vars)
     self.x_t_Ia.append(np.copy(self.tstep_source.xd.x_t_Ia))
     for rt in self.record_traits:
         self.record_t[rt].append(getattr(self.tstep_source, rt))
def run_pi_x_sq_trotter(x_max=5.,
                        nx=201,
                        N_iter=7,
                        beta_fin=4,
                        potential=harmonic_potential,
                        potential_string='harmonic_potential',
                        print_steps=True,
                        save_data=True,
                        file_name=None,
                        relevant_info=None,
                        plot=True,
                        save_plot=True,
                        show_plot=True):
    """
    Uso:    corre algoritmo matrix squaring iterativamente (N_iter veces). En la primera
            iteración se usa una matriz densidad en aproximación de Trotter a temperatura
            inversa beta_ini = beta_fin * 2**(-N_iter) para potencial dado por potential;
            en las siguientes iteraciones se usa matriz densidad generada por la iteración 
            inmediatamente anterior. Además ésta función guarda datos de pi(x;beta) vs. x
            en archivo de texto y grafica pi(x;beta) comparándolo con teoría para el oscilador 
            armónico cuántico.

    Recibe:
        x_max: float        ->  los valores de x estarán en el intervalo (-x_max,x_max).
        nx: int             ->  número de valores de x considerados.
        N_iter: int         ->  número de iteraciones del algoritmo matrix squaring.
        beta_ini: float     ->  valor de inverso de temperatura que queremos tener al final de
                                aplicar el algoritmo matrix squaring iterativamente. 
        potential: func     ->  potencial de interacción usado en aproximación de trotter. Debe 
                                ser función de x.
        potential_string: str   ->  nombre del potencial (con éste nombramos los archivos que
                                    se generan).
        print_steps: bool   ->  decide si imprime los pasos del algoritmo matrix squaring.
        save_data: bool     ->  decide si guarda los datos en archivo .csv.
        plot: bool          ->  decide si grafica.
        save_plot: bool     ->  decide si guarda la figura.
        show_plot: bool     ->  decide si muestra la figura en pantalla. 
    
    Devuelve:
        rho: numpy array, shape=(nx,nx)     ->  matriz densidad de estado rho a temperatura 
                                                inversa igual a beta_fin.
        trace_rho: float                    ->  traza de la matriz densidad a temperatura inversa
                                                igual a beta_fin. Por la definición que tomamos
                                                de "rho", ésta es equivalente a la función 
                                                partición en dicha temperatura.
        grid_x: numpy array, shape=(nx,)    ->  valores de x en los que está evaluada rho.
    """
    # Cálculo del valor de beta_ini según valores beta_fin y N_iter dados como input
    beta_ini = beta_fin * 2**(-N_iter)
    # Cálculo de rho con aproximación de Trotter
    rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
    # Aproximación de rho con matrix squaring iterado N_iter veces.
    rho, trace_rho, beta_fin_2 = density_matrix_squaring(
        rho, grid_x, N_iter, beta_ini, print_steps)
    print('----------------------------------------------------------------' +
          '--------------------------------------------------------\n'
          u'Matrix squaring: beta_ini = %.3f --> beta_fin = %.3f' %
          (beta_ini, beta_fin_2) +
          u'   N_iter = %d   Z(beta_fin) = Tr(rho(beta_fin)) = %.3E' %
          (N_iter, trace_rho))
    # Normalización de rho a 1 y cálculo de densidades de probabilidad para valores en grid_x.
    rho_normalized = np.copy(rho) / trace_rho
    x_weights = np.diag(rho_normalized)
    # Guarda datos en archivo .csv.
    script_dir = os.path.dirname(
        os.path.abspath(__file__))  #path completa para este script
    if save_data == True:
        # Nombre del archivo .csv en el que guardamos valores de pi(x;beta_fin).
        if file_name is None:
            csv_file_name = script_dir+u'/pi_x-ms-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.csv'\
                                            %(potential_string,beta_fin,x_max,nx,N_iter)
        else:
            csv_file_name = script_dir + '/' + file_name
        # Información relevante para agregar como comentario al archivo csv.
        if relevant_info is None:
            relevant_info = [   'pi(x;beta_fin) computed using matrix squaring algorithm and' + \
                                ' Trotter approximation. Parameters:',
                                u'%s   x_max = %.3f   nx = %d   '%(potential_string,x_max,nx) + \
                                u'N_iter = %d   beta_ini = %.3f   '%(N_iter,beta_ini,) + \
                                u'beta_fin = %.3f'%beta_fin ]
        # Guardamos valores  de pi(x;beta_fin) en archivo csv.
        pi_x_data = [grid_x.copy(), x_weights.copy()]
        pi_x_data_headers = ['position_x', 'prob_density']
        pi_x_data = save_csv(pi_x_data,
                             pi_x_data_headers,
                             csv_file_name,
                             relevant_info,
                             print_data=0)

    # Gráfica y comparación con teoría
    if plot == True:
        plt.figure(figsize=(8, 5))
        plt.plot(
            grid_x,
            x_weights,
            label=
            'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'
            % (N_iter, dx))
        plt.plot(grid_x,
                 QHO_canonical_ensemble(grid_x, beta_fin),
                 label=u'Valor teórico QHO')
        plt.xlabel(u'x')
        plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
        plt.legend(loc='best', title=u'$\\beta=%.2f$' % beta_fin)
        plt.tight_layout()
        if save_plot == True:
            if file_name is None:
                plot_file_name = script_dir + u'/pi_x-ms-plot-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.eps' % (
                    potential_string, beta_fin, x_max, nx, N_iter)
            else:
                plot_file_name = script_dir + u'/pi_x-ms-plot-' + file_name + '.eps'
            plt.savefig(plot_file_name)
        if show_plot == True:
            plt.show()
        plt.close()
    return rho, trace_rho, grid_x
Exemplo n.º 45
0
cu = np.empty((q,xsize,ysize))
#u = np.empty((2,xsize,ysize))
#u = np.zeros((2,xsize,ysize))
u = np.zeros((2,xsize,ysize), dtype=float).astype(np.float32)
u1 = np.empty((2,xsize,ysize))
fplus =np.empty((q,xsize,ysize))
fminus = np.empty((q,xsize,ysize))
feplus = np.empty((q,xsize,ysize))
feminus = np.empty((q,xsize,ysize))
#ftemp = np.empty((q,xsize,ysize))
#fin = np.zeros((q,xsize,ysize))
fin = np.zeros((9,xsize,ysize), dtype=float).astype(np.float32)
ftemp = np.zeros((9,xsize,ysize), dtype=float).astype(np.float32)
fin1 = np.empty((q,xsize,ysize))
rho1 = np.empty((xsize,ysize))
fpost = np.copy(fin)
u[:,:,:]=0.0
u_past = u.copy()
#rho = np.ones((xsize,ysize))
rho = np.ones((xsize,ysize), dtype=float).astype(np.float32)
rhob = rho[np.newaxis,:,:]
usqr = np.empty((xsize,ysize))
usqrb = np.empty((q,xsize,ysize))

#@nmb.jit(nmb.f8[:,:,:](nmb.f8[:,:], nmb.f8[:,:,:]))
#@cache(maxsize=None)
def equ(rho,u):
    #cu   = dot(c, u.transpose(1, 0, 2))
    # peeling loop to increase speed
    cu[0] = (c[0,0]*u[0] + c[0,1]*u[1])
    cu[1] = (c[1,0]*u[0] + c[1,1]*u[1])
    def get_samples(self, sampleIndices, featVect_orig, numSamples=10):
        '''
        Input   featVect        the complete feature vector
                sampleIndices   the raveled(!) indices which we want to sample
                numSamples      how many samples to draw
                
        '''

        featVect = np.copy(featVect_orig)

        # to avoid mistakes, remove the feature values of the part that we want to sample
        featVect.ravel()[sampleIndices.ravel()] = 0

        # reshape inputs if necessary
        if np.ndim(sampleIndices) == 1:
            sampleIndices = sampleIndices.reshape(3, self.win_size,
                                                  self.win_size)
        if np.ndim(featVect) == 1:
            featVect = featVect.reshape(
                [3, self.image_dims[0], self.image_dims[1]])

        # get a patch surrounding the sample indices and the indices relative to that
        patch, patchIndices = self._get_surr_patch(featVect, sampleIndices)

        # For each color channel, we will conditionally sample pixel
        # values from a multivariate distribution

        samples = np.zeros((numSamples, 3, self.win_size * self.win_size))

        for c in [0, 1, 2]:

            patch_c = patch[c].ravel()
            patchIndices_c = patchIndices[c].ravel()

            # get the conditional mean and covariance
            if self.padding_size == 0:
                cond_mean = self.meanVects[c]
                cond_cov = self.covMat[c]
            else:
                cond_mean, cond_cov = self._get_cond_params(
                    patch_c, patchIndices_c, c)

            # sample from the conditional distribution

    #        samples = np.random.multivariate_normal(cond_mean, cond_cov, numSamples)
    # -- FASTER:
            dimGauss = self.win_size * self.win_size
            # --- (1) find real matrix A such that AA^T=Sigma ---
            A = np.linalg.cholesky(cond_cov)
            # --- (2) get (numSamples) samples from a standard normal ---
            z = np.random.normal(size=numSamples * dimGauss).reshape(
                dimGauss, numSamples)
            # --- (3) x=mu+Az ---
            samples[:, c] = cond_mean[np.newaxis, :] + np.dot(A, z).T

        samples = samples.reshape((numSamples, -1))

        # get the min/max values for this particular sample
        # (since the data is preprocessed these can be different for each pixel!)\
        #print self.minMaxVals[0].shape
        minVals_sample = self.minMaxVals[0].ravel()[sampleIndices.ravel()]
        maxVals_sample = self.minMaxVals[1].ravel()[sampleIndices.ravel()]
        # clip the values
        for i in xrange(samples.shape[0]):
            samples[i][samples[i] < minVals_sample] = minVals_sample[
                samples[i] < minVals_sample]
            samples[i][samples[i] > maxVals_sample] = maxVals_sample[
                samples[i] > maxVals_sample]

        return samples
Exemplo n.º 47
0
def trainKeras(En, A, Cl, A_test, Cl_test, Root):
    import tensorflow as tf
    
    # Use this to restrict GPU memory allocation in TF
    opts = tf.GPUOptions(per_process_gpu_memory_fraction=sysDef.fractionGPUmemory)
    conf = tf.ConfigProto(gpu_options=opts)
    #conf.gpu_options.allow_growth = True
    
    if kerasDef.useTFKeras:
        import tensorflow.keras as keras  #tf.keras
        tf.Session(config=conf)
    else:
        import keras   # pure keras
        from keras.backend.tensorflow_backend import set_session
        set_session(tf.Session(config=conf))
    
    from sklearn import preprocessing
    from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
    
    tb_directory = "keras_" + str(len(kerasDef.hidden_layers))+"HL_"+str(kerasDef.hidden_layers[0])
    model_directory = "."
    if kerasDef.regressor:
        model_name = model_directory+"/keras_regressor_"+str(len(kerasDef.hidden_layers))+"HL_"+str(kerasDef.hidden_layers[0])+".hd5"
    else:
        model_name = model_directory+"/keras_"+str(len(kerasDef.hidden_layers))+"HL_"+str(kerasDef.hidden_layers[0])+".hd5"
    model_le = model_directory+"/keras_model_le.pkl"
    
    if kerasDef.alwaysRetrain == False:
        print(" Training model saved in: ", model_name, "\n")
    else:
        kerasDef.alwaysImprove = False
        print(" Training model not saved\n")
    
    #**********************************************
    ''' Initialize Estimator and training data '''
    #**********************************************
    print(' Preprocessing data and classes for Keras\n')

    totA = np.vstack((A, A_test))
    totCl = np.append(Cl, Cl_test)

    
    if kerasDef.regressor:
        Cl2 = np.copy(Cl)
        Cl2_test = np.copy(Cl_test)
        le = None
    else:

        numTotClasses = np.unique(totCl).size
        le = preprocessing.LabelEncoder()
        totCl2 = le.fit_transform(totCl)
        Cl2 = le.transform(Cl)
        Cl2_test = le.transform(Cl_test)

        totCl2 = keras.utils.to_categorical(totCl2, num_classes=np.unique(totCl).size)
        Cl2 = keras.utils.to_categorical(Cl2, num_classes=np.unique(totCl).size+1)
        Cl2_test = keras.utils.to_categorical(Cl2_test, num_classes=np.unique(totCl).size+1)
        print(" Label Encoder saved in:", model_le)
        with open(model_le, 'ab') as f:
            f.write(pickle.dumps(le))
    
    if kerasDef.fullBatch == True:
        batch_size = A.shape[0]
    else:
        batch_size = kerasDef.batchSize

    printParamKeras(A)

    if kerasDef.alwaysImprove == True or os.path.exists(model_name) is False:
        model = keras.models.Sequential()
        for numLayers in kerasDef.hidden_layers:
            model.add(keras.layers.Dense(numLayers,
                    activation = kerasDef.activation_function,
                    input_dim=A.shape[1],
                    kernel_regularizer=keras.regularizers.l2(kerasDef.l2_reg_strength)))
            model.add(keras.layers.Dropout(kerasDef.dropout_perc))

        if kerasDef.regressor:
            model.add(keras.layers.Dense(1))
            model.compile(loss='mse',
                optimizer=kerasDef.optimizer,
                metrics=['mae'])
        else:
            model.add(keras.layers.Dense(np.unique(totCl).size+1, activation = 'softmax'))
            model.compile(loss='categorical_crossentropy',
              optimizer=kerasDef.optimizer,
              metrics=['accuracy'])

        tbLog = keras.callbacks.TensorBoard(log_dir=tb_directory, histogram_freq=kerasDef.tbHistogramFreq,
                batch_size=batch_size, write_graph=True, write_grads=True, write_images=True)
        #tbLog.set_model(model)
        tbLogs = [tbLog]
        log = model.fit(A, Cl2,
            epochs=kerasDef.trainingSteps,
            batch_size=batch_size,
            callbacks = tbLogs,
            verbose = 2,
            validation_data=(A_test, Cl2_test))

        loss = np.asarray(log.history['loss'])
        val_loss = np.asarray(log.history['val_loss'])

        if kerasDef.regressor:
            accuracy = None
            val_acc = None
        else:
            accuracy = np.asarray(log.history['acc'])
            val_acc = np.asarray(log.history['val_acc'])

        model.save(model_name)

        if kerasDef.plotModel == True:
            from keras.utils import plot_model
            keras.utils.plot_model(model, to_file=model_directory+'/keras_MLP_model.png', show_shapes=True)
            
            import matplotlib.pyplot as plt
            plt.figure(tight_layout=True)
            plotInd = int(len(kerasDef.hidden_layers))*100+11
            visibleX = True
            for layer in model.layers:
                try:
                    w_layer = layer.get_weights()[0]
                    ax = plt.subplot(plotInd)
                    newX = np.arange(En[0], En[-1], (En[-1]-En[0])/w_layer.shape[0])
                    plt.plot(En, np.interp(En, newX, w_layer[:,0]), label=layer.get_config()['name'])
                    plt.legend(loc='upper right')
                    plt.setp(ax.get_xticklabels(), visible=visibleX)
                    visibleX = False
                    plotInd +=1
                except:
                    pass

            plt.xlabel('Raman shift [1/cm]')
            plt.legend(loc='upper right')
            plt.savefig('keras_weights_MLP' + '.png', dpi = 160, format = 'png')  # Save plot

        printModelKeras(model)

        print("\n  Number of spectra = ",A.shape[0])
        print("  Number of points in each spectra = ", A.shape[1])
        if kerasDef.regressor == False:
            print("  Number unique classes (training): ", np.unique(Cl).size)
            print("  Number unique classes (validation):", np.unique(Cl_test).size)
            print("  Number unique classes (total): ", np.unique(totCl).size)

        printParamKeras(A)
        printTrainSummary(accuracy, loss, val_acc, val_loss)
        
    else:
        print(" Retreaving training model from: ", model_name,"\n")
        model = keras.models.load_model(model_name)
        printModelKeras(model)
        printParamKeras(A)
    
    score = model.evaluate(A_test, Cl2_test, batch_size=batch_size)
    printEvalSummary(model_name, score)
    return model, le
Exemplo n.º 48
0
 def init_state(self):
     super(ShearZoneHist, self).init_state()
     self.x_t_Ia.append(np.copy(self.tstep_source.xd.x_t_Ia))
     for rt in self.record_traits:
         self.record_t[rt] = [0]
Exemplo n.º 49
0
def plot_1(ax, samples, inst, companion, style, 
           base=None, dt=None,
           zoomwindow=None, force_binning=False,
           kwargs_data=None,
           kwargs_model=None,
           kwargs_ax=None):
    '''
    Inputs:
    -------
    ax : matplotlib axis
    
    samples : array
        Prior or posterior samples to plot the fit from
    
    inst: str
        Name of the instrument (e.g. 'TESS')
        
    companion : None or str
        None or 'b'/'c'/etc.
        
    style: str
        'full' / 'per_transit' / 'phase' / 'phasezoom' / 'phasezoom_occ' /'phase_curve'
        'full_residuals' / 'phase_residuals' / 'phasezoom_residuals' / 'phasezoom_occ_residuals' / 'phase_curve_residuals'
    
    zoomwindow: int or float
        the full width of the window to zoom into (in hours)
        default: 8 hours
    
    base: a BASEMENT class object
        (for internal use only)
        
    dt : float
        time steps on which the model should be evaluated for plots
        in days
        default for style='full': 2 min for <1 day of data; 30 min for >1 day of data.
        
    Notes:
    ------
    yerr / epoch / period: 
        come either from
        a) the initial_guess value or 
        b) the MCMC median,
        depending on what is plotted (i.e. not from individual samples)

    '''

    #==========================================================================
    #::: interpret input
    #==========================================================================
    if base==None:
        base = config.BASEMENT
    
    if samples is not None:
        params_median, params_ll, params_ul = get_params_from_samples(samples)
    
    if kwargs_data is None: kwargs_data = {}
    if 'label' not in kwargs_data: kwargs_data['label'] = inst
    if 'marker' not in kwargs_data: kwargs_data['marker'] = '.'
    if 'markersize' not in kwargs_data: kwargs_data['markersize'] = 8.
    if 'linestyle' not in kwargs_data: kwargs_data['linestyle'] = 'none'
    if 'color' not in kwargs_data: kwargs_data['color'] = 'b'
    if 'alpha' not in kwargs_data: kwargs_data['alpha'] = 1.
    if 'rasterized' not in kwargs_data: kwargs_data['rasterized'] = True
    
    if kwargs_model is None: kwargs_model = {}
    if 'marker' not in kwargs_model: kwargs_model['marker'] = 'none'
    if 'markersize' not in kwargs_model: kwargs_model['markersize'] = 0.
    if 'linestyle' not in kwargs_model: kwargs_model['linestyle'] = '-'
    if 'color' not in kwargs_model: kwargs_model['color'] = 'r'
    if 'alpha' not in kwargs_model: kwargs_model['alpha'] = 1.
    
    if kwargs_ax is None: kwargs_ax = {}
    if 'title' not in kwargs_ax: kwargs_ax['title'] = None
    if 'xlabel' not in kwargs_ax: kwargs_ax['xlabel'] = None
    if 'ylabel' not in kwargs_ax: kwargs_ax['ylabel'] = None
    
    timelabel = 'Time' #removed feature
    
    if zoomwindow is None:
        zoomwindow = base.settings['zoom_window'] * 24. #user input is in days, convert here to hours
    
    
    #==========================================================================
    #::: helper fct
    #==========================================================================
    def set_title(title1):
        if kwargs_ax['title'] is None: return title1
        else: return kwargs_ax['title']
    
    
    #==========================================================================
    #::: do stuff
    #==========================================================================
    if inst in base.settings['inst_phot']:
        key='flux'
        baseline_plus = 1.
        if style in ['full']:
            ylabel = 'Relative Flux'
        elif style in ['full_minus_offset']:
            ylabel = 'Relative Flux - Offset'
        elif style in ['phase', 'phasezoom', 'phasezoom_occ', 'phase_curve']:
            ylabel = 'Relative Flux - Baseline'
        elif style in ['full_residuals', 'phase_residuals', 'phasezoom_residuals', 'phasezoom_occ_residuals', 'phase_curve_residuals']:
            ylabel = 'Residuals'
            
    elif inst in base.settings['inst_rv']:
        key='rv'
        baseline_plus = 0.
        if style in ['full']:
            ylabel = 'RV (km/s)'
        elif style in ['full_minus_offset']:
            ylabel = 'RV (km/s) - Offset'
        elif style in ['phase', 'phasezoom', 'phasezoom_occ', 'phase_curve']:
            ylabel = 'RV (km/s) - Baseline'
        elif style in ['full_residuals', 'phase_residuals', 'phasezoom_residuals', 'phasezoom_occ_residuals', 'phase_curve_residuals']:
            ylabel = 'Residuals'
            
    elif inst in base.settings['inst_rv2']:
        key='rv2'
        baseline_plus = 0.
        if style in ['full']:
            ylabel = 'RV (km/s)'
        elif style in ['full_minus_offset']:
            ylabel = 'RV (km/s) - Offset'
        elif style in ['phase', 'phasezoom', 'phasezoom_occ', 'phase_curve']:
            ylabel = 'RV (km/s) - Baseline'
        elif style in ['full_residuals', 'phase_residuals', 'phasezoom_residuals', 'phasezoom_occ_residuals', 'phase_curve_residuals']:
            ylabel = 'Residuals'
        
    else:
        raise ValueError('inst should be: inst_phot, inst_rv, or inst_rv2...')
    
    
    if samples is not None:
        if samples.shape[0]==1:
            alpha = 1.
        else:
            alpha = 0.1
        


    #==========================================================================
    # guesstimate where the secondary eclipse / occultation is
    #==========================================================================
    e = params_median[companion+'_f_s']**2 + params_median[companion+'_f_c']**2
    w = np.mod( np.arctan2(params_median[companion+'_f_s'], params_median[companion+'_f_c']), 2*np.pi) #in rad, from 0 to 2*pi
    phase_shift = 0.5 * (1. + 4./np.pi * e * np.cos(w)) #in phase units; approximation from Winn2010
 
        
 
    #==========================================================================
    # full time series, not phased
    # plot the 'undetrended' data
    # plot each sampled model + its baseline 
    #==========================================================================
    if style in ['full', 'full_minus_offset', 'full_residuals']:
        
        #::: set it up
        x = base.data[inst]['time']
        
        if timelabel=='Time_since':
            x = np.copy(x)
            objttime = Time(x, format='jd', scale='utc')
            xsave = np.copy(x)
            x -= x[0]

        y = 1.*base.data[inst][key]
        yerr_w = calculate_yerr_w(params_median, inst, key)
        
        
        #::: remove offset only (if wished)
        if style in ['full_minus_offset']:
            baseline = calculate_baseline(params_median, inst, key)
            y -= np.median(baseline)
            
            
        #::: calculate residuals (if wished)
        if style in ['full_residuals']:
            model = calculate_model(params_median, inst, key)
            baseline = calculate_baseline(params_median, inst, key)
            stellar_var = calculate_stellar_var(params_median, 'all', key, xx=x)
            y -= model+baseline+stellar_var
            
            
        #::: plot data, not phase        
#        ax.errorbar(base.fulldata[inst]['time'], base.fulldata[inst][key], yerr=np.nanmedian(yerr_w), marker='.', linestyle='none', color='lightgrey', zorder=-1, rasterized=True ) 
        # ax.errorbar(x, y, yerr=yerr_w, marker=kwargs_data['marker'], markersize=kwargs_data['markersize'], linestyle=kwargs_data['linestyle'], color=kwargs_data['color'], alpha=kwargs_data['alpha'], capsize=0, rasterized=kwargs_data['rasterized'] )  
        ax.errorbar(x, y, yerr=yerr_w, capsize=0, **kwargs_data)  
        if base.settings['color_plot']:
            ax.scatter(x, y, c=x, marker='o', rasterized=kwargs_data['rasterized'], cmap='inferno', zorder=11 ) 
            
        if timelabel=='Time_since':
            ax.set(xlabel='Time since %s [days]' % objttime[0].isot[:10], ylabel=ylabel, title=set_title(inst))
        elif timelabel=='Time':
            ax.set(xlabel='Time (BJD)', ylabel=ylabel, title=set_title(inst))
            
            
        #::: plot model + baseline, not phased
        if (style in ['full','full_minus_offset']) and (samples is not None):
            
            #if <1 day of photometric data: plot with 2 min resolution
            if dt is None:
                if ((x[-1] - x[0]) < 1): 
                    dt = 2./24./60. 
                #else: plot with 30 min resolution
                else: 
                    dt = 30./24./60. 
                    
            if key == 'flux':
                xx_full = np.arange( x[0], x[-1]+dt, dt)
                Npoints_chunk = 48
                for i_chunk in tqdm(range(int(1.*len(xx_full)/Npoints_chunk)+2)):
                    xx = xx_full[i_chunk*Npoints_chunk:(i_chunk+1)*Npoints_chunk] #plot in chunks of 48 points (1 day)
                    if len(xx)>0 and any( (x>xx[0]) & (x<xx[-1]) ): #plot only where there is data
                        for i in range(samples.shape[0]):
                            s = samples[i,:]
                            p = update_params(s)
                            model = calculate_model(p, inst, key, xx=xx) #evaluated on xx (!)
                            baseline = calculate_baseline(p, inst, key, xx=xx) #evaluated on xx (!)
                            if style in ['full_minus_offset']:
                                baseline -= np.median(baseline)
                            stellar_var = calculate_stellar_var(p, 'all', key, xx=xx) #evaluated on xx (!)
                            ax.plot( xx, baseline+stellar_var+baseline_plus, 'k-', color='orange', alpha=alpha, zorder=12 )
                            ax.plot( xx, model+baseline+stellar_var, 'r-', alpha=alpha, zorder=12 )
            elif key in ['rv', 'rv2']:
                xx = np.arange( x[0], x[-1]+dt, dt)
                for i in range(samples.shape[0]):
                    s = samples[i,:]
                    p = update_params(s)
                    model = calculate_model(p, inst, key, xx=xx) #evaluated on xx (!)
                    baseline = calculate_baseline(p, inst, key, xx=xx) #evaluated on xx (!)
                    if style in ['full_minus_offset']:
                        baseline -= np.median(baseline)                    
                    stellar_var = calculate_stellar_var(p, 'all', key, xx=xx) #evaluated on xx (!)
                    ax.plot( xx, baseline+stellar_var+baseline_plus, 'k-', color='orange', alpha=alpha, zorder=12 )
                    ax.plot( xx, model+baseline+stellar_var, 'r-', alpha=alpha, zorder=12 )
        
        #::: other stuff
        if timelabel=='Time_since':
            x = np.copy(xsave)
            
            
            
            
    #==========================================================================
    # phase-folded time series
    # get a 'median' baseline from intial guess value / MCMC median result
    # detrend the data with this 'median' baseline
    # then phase-fold the 'detrended' data
    # plot each phase-folded model (without baseline)
    # Note: this is not ideal, as we overplot models with different epochs/periods/baselines onto a phase-folded plot
    #==========================================================================
    elif style in ['phase', 'phasezoom', 'phasezoom_occ', 'phase_curve',
                   'phase_residuals', 'phasezoom_residuals', 'phasezoom_occ_residuals', 'phase_curve_residuals']:
        
        #::: data - baseline_median
        x = 1.*base.data[inst]['time']
        baseline_median = calculate_baseline(params_median, inst, key) #evaluated on x (!)
        stellar_var_median = calculate_stellar_var(params_median, 'all', key, xx=x) #evaluated on x (!)
        y = base.data[inst][key] - baseline_median - stellar_var_median
        yerr_w = calculate_yerr_w(params_median, inst, key)
        
        #::: zoom?
        if style in ['phasezoom', 'phasezoom_occ', 
                     'phasezoom_residuals', 'phasezoom_occ_residuals']: 
            zoomfactor = params_median[companion+'_period']*24.
        else: 
            zoomfactor = 1.
        
        
        #----------------------------------------------------------------------
        #::: Radial velocity
        #::: need to take care of multiple companions
        #----------------------------------------------------------------------
        if (inst in base.settings['inst_rv']) or (inst in base.settings['inst_rv2']):
            
            #::: get key
            if (inst in base.settings['inst_rv']): i_return = 0
            elif (inst in base.settings['inst_rv2']): i_return = 1
              
                
            #::: remove other companions
            for other_companion in base.settings['companions_rv']:
                if companion!=other_companion:
                    model = rv_fct(params_median, inst, other_companion)[i_return]
                    y -= model
            
            
            #::: calculate residuals (if wished)
            if style in ['phase_residuals', 'phasezoom_residuals', 'phasezoom_occ_residuals', 'phase_curve_residuals']:
                model = rv_fct(params_median, inst, companion)[i_return]
                y -= model
                
                
            #::: plot data, phased        
            phase_time, phase_y, phase_y_err, _, phi = lct.phase_fold(x, y, params_median[companion+'_period'], params_median[companion+'_epoch'], dt = 0.002, ferr_type='meansig', ferr_style='sem', sigmaclip=False)    
            if (len(x) > 500) or force_binning:
                ax.plot( phi*zoomfactor, y, 'k.', color='lightgrey', rasterized=kwargs_data['rasterized'] ) #don't allow any other kwargs_data here
                ax.errorbar( phase_time*zoomfactor, phase_y, yerr=phase_y_err, capsize=0, zorder=11, **kwargs_data )
            else:
                ax.errorbar( phi*zoomfactor, y, yerr=yerr_w, capsize=0, zorder=11, **kwargs_data )      
            ax.set(xlabel='Phase', ylabel=ylabel, title=set_title(inst+', companion '+companion+' only'))
    
    
            #::: plot model, phased (if wished)
            if (style in ['phase', 'phasezoom', 'phasezoom_occ', 'phase_curve']) and (samples is not None):
                xx = np.linspace( -0.25, 0.75, 1000)
                xx2 = params_median[companion+'_epoch']+np.linspace( -0.25, 0.75, 1000)*params_median[companion+'_period']
                for i in range(samples.shape[0]):
                    s = samples[i,:]
                    p = update_params(s)
#                    p = update_params(s, phased=True)
                    model = rv_fct(p, inst, companion, xx=xx2)[i_return]
                    ax.plot( xx*zoomfactor, model, 'r-', alpha=alpha, zorder=12 )
            
        
        #----------------------------------------------------------------------
        #::: Photometry
        #----------------------------------------------------------------------
        elif (inst in base.settings['inst_phot']):
            
            #::: remove other companions
            for other_companion in base.settings['companions_phot']:
                if companion!=other_companion:
                    model = flux_fct(params_median, inst, other_companion)
                    y -= (model-1.)
                    
                    
            #::: calculate residuals (if wished)
            if style in ['phase_residuals', 'phasezoom_residuals', 'phasezoom_occ_residuals', 'phase_curve_residuals']:
                model = flux_fct(params_median, inst, companion)
                y -= model
                    
                
            #::: plot data, phased  
            if style in ['phase', 
                         'phase_residuals']:
                dt = 0.002
            elif style in ['phase_curve', 
                           'phase_curve_residuals']:
                dt = 0.01            
            elif style in ['phasezoom', 'phasezoom_occ', 
                           'phasezoom_residuals', 'phasezoom_occ_residuals']: 
                dt = 15./60./24. / params_median[companion+'_period']
                
            phase_time, phase_y, phase_y_err, _, phi = lct.phase_fold(x, y, params_median[companion+'_period'], params_median[companion+'_epoch'], dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=False)    
            if (len(x) > 500) or force_binning:
                if style in ['phase_curve', 
                             'phase_curve_residuals']:
                    ax.plot( phase_time*zoomfactor, phase_y, 'b.', color=kwargs_data['color'], rasterized=kwargs_data['rasterized'], zorder=11 )                    
                else: 
                    ax.plot( phi*zoomfactor, y, 'b.', color='lightgrey', rasterized=kwargs_data['rasterized'], )
                    ax.errorbar( phase_time*zoomfactor, phase_y, yerr=phase_y_err, capsize=0, zorder=11, **kwargs_data )
            else:
                ax.errorbar( phi*zoomfactor, y, yerr=yerr_w, capsize=0, zorder=11, **kwargs_data )
                if base.settings['color_plot']:
                    ax.scatter( phi*zoomfactor, y, c=x, marker='o', rasterized=kwargs_data['rasterized'], cmap='inferno', zorder=11 )          
            ax.set(xlabel='Phase', ylabel=ylabel, title=set_title(inst+', companion '+companion))
    
    
            #::: plot model, phased (if wished)
            if style in ['phase', 'phasezoom', 'phasezoom_occ', 'phase_curve']:
                
                if style in ['phase', 'phase_curve']:
                    xx = np.linspace(-0.25, 0.75, 1000)
                    xx2 = params_median[companion+'_epoch'] + xx * params_median[companion+'_period']
                elif style in ['phasezoom']:
                    xx = np.linspace( -10./zoomfactor, 10./zoomfactor, 1000)
                    xx2 = params_median[companion+'_epoch'] + xx * params_median[companion+'_period']
                elif style in ['phasezoom_occ']:
                    xx = np.linspace( -10./zoomfactor + phase_shift, 10./zoomfactor + phase_shift, 1000 )
                    xx2 = params_median[companion+'_epoch'] + xx * params_median[companion+'_period']
    
                if samples is not None:
                    for i in range(samples.shape[0]):
                        s = samples[i,:]
                        p = update_params(s)
    #                    p = update_params(s, phased=True)
                        model = flux_fct(p, inst, companion, xx=xx2) #evaluated on xx (!)
                        ax.plot( xx*zoomfactor, model, 'r-', alpha=alpha, zorder=12 )
             
        
        #::: x-zoom?
        if style in ['phasezoom',
                     'phasezoom_residuals']:
                ax.set( xlim=[-zoomwindow/2.,zoomwindow/2.], xlabel=r'$\mathrm{ T - T_0 \ (h) }$' )
        elif style in ['phasezoom_occ',
                       'phasezoom_occ_residuals']:
                xlower = -zoomwindow/2. + phase_shift*params_median[companion+'_period']*24.
                xupper = zoomwindow/2. + phase_shift*params_median[companion+'_period']*24.
                ax.set( xlim=[xlower, xupper], xlabel=r'$\mathrm{ T - T_0 \ (h) }$' )
        
        
        #::: y-zoom onto occultation and phase variations
        if style in ['phasezoom_occ']:
                # try:
            buf = phase_y[phase_time>0.25] #TODO: replace with proper eclipse indexing
            def nanptp(arr): return np.nanmax(arr)-np.nanmin(arr)
            ax.set( ylim=[np.nanmin(buf)-0.1*nanptp(buf), np.nanmax(buf)+0.1*nanptp(buf)] )
                # except:
                #     ax.set( ylim=[0.999,1.0005] )
       
        if style in ['phase_curve', 
                     'phase_curve_residuals']:
                try:
                    phase_curve_no_dips = flux_subfct_sinusoidal_phase_curves(params_median, inst, companion, np.ones_like(xx), xx=xx)
                    ax.set(ylim=[np.min(phase_curve_no_dips)-0.1*np.ptp(phase_curve_no_dips), np.max(phase_curve_no_dips)+0.1*np.ptp(phase_curve_no_dips)])
                except:
                    ax.set( ylim=[0.999,1.001] )
Exemplo n.º 50
0
    weight_decay=weight_decay,
    max_lbfgs_iter=max_lbfgs_iter,
    num_classes=num_classes, 
    batch_size=batch_size,
    data_sets=data_sets,
    initial_learning_rate=initial_learning_rate,
    keep_probs=keep_probs,
    decay_epochs=decay_epochs,
    mini_batch=False,
    train_dir='output',
    log_dir='log',
    model_name='spam_logreg')

tf_model.train()

X_train = np.copy(tf_model.data_sets.train.x)
Y_train = np.copy(tf_model.data_sets.train.labels)
X_test = np.copy(tf_model.data_sets.test.x)
Y_test = np.copy(tf_model.data_sets.test.labels) 


num_train_examples = Y_train.shape[0] 
num_flip_vals = 6
num_check_vals = 6
num_random_seeds = 40

dims = (num_flip_vals, num_check_vals, num_random_seeds, 3)
fixed_influence_loo_results = np.zeros(dims)
fixed_loss_results = np.zeros(dims)
fixed_random_results = np.zeros(dims)
Exemplo n.º 51
0
import json
import numpy as np
import rrcf

np.random.seed(0)
n = 100
d = 3
X = np.random.randn(n, d)
Z = np.copy(X)
Z[90:, :] = 1

tree = rrcf.RCTree(X)
duplicate_tree = rrcf.RCTree(Z)

tree_seeded = rrcf.RCTree(random_state=0)
duplicate_tree_seeded = rrcf.RCTree(random_state=np.random.RandomState(0))

deck = np.arange(n, dtype=int)
np.random.shuffle(deck)
indexes = deck[:5]


def test_batch():
    # Check stored bounding boxes and leaf counts after instantiating from batch
    branches = []
    tree.map_branches(tree.root, op=tree._get_nodes, stack=branches)
    leafcount = tree._count_leaves(tree.root)
    assert (leafcount == n)
    for branch in branches:
        leafcount = tree._count_leaves(branch)
        assert (leafcount == branch.n)
Exemplo n.º 52
0
    def _get_images(self, L, resize=None, apply_distortions=False, return_gray=False, PRINTING=False):
        pyDB = self.pyDB
        A = []
        for loc_idx, yr_idx, im_idx in L:
            loc = pyDB.keys()[loc_idx]
            yr_list = pyDB[ loc ].keys()
            yr = yr_list[ yr_idx ]
            im_list = pyDB[loc][yr]
            try:
                im_name = im_list[ im_idx ]

                # print loc_idx, yr_idx, im_idx
                file_name = self.TTM_BASE+'/images/'+im_name
                if PRINTING:
                    print 'imread : ', file_name
                # TODO blur before resizing
                if resize is None:
                    IM = cv2.imread( file_name )
                else:
                    IM = cv2.resize( cv2.imread( file_name ) , resize  )
                # IM = cv2.resize( cv2.imread( file_name ) , (160,120)  )
            except:
                print 'im_indx error', im_list
                IM = np.zeros( (240, 320, 3) ).astype('uint8')

            # Random Distortion
            if apply_distortions == True and np.random.rand() > 0.5: #apply random distortions to only 50% of samples
                #TODO: Make use of RandomDistortions class (end of this file) for complicated Distortions, for now quick and dirty way
                # # Planar rotate IM, this rotation gives black-borders, need to crop
                # rows,cols, _ = IM.shape
                # irot = np.random.uniform(-180,180 )#np.random.randn() * 25.
                # M = cv2.getRotationMatrix2D((cols*.5,rows*.5),irot,1.)
                # dst = cv2.warpAffine(IM,M,(cols,rows))
                # IM = dst

                # Planar rotation, cropped. adopted from `test_rot-test.py`
                image_height, image_width = IM.shape[0:2]
                image_orig = np.copy(IM)
                irot = np.random.uniform(-180,180 )#np.random.randn() * 25.
                image_rotated = rotate_image(IM, irot)
                image_rotated_cropped = crop_around_center(
                    image_rotated,
                    *largest_rotated_rect(
                        image_width,
                        image_height,
                        math.radians(irot)
                    ))
                IM = cv2.resize( image_rotated_cropped, (320,240) )




            if return_gray == True:
                IM_gray = cv2.cvtColor( IM, cv2.COLOR_BGR2GRAY )
                IM = np.expand_dims( IM_gray, axis=2 )


            # A.append( IM[:,:,::-1] )
            A.append( IM )

        return np.array(A)
Exemplo n.º 53
0
plt.close()

# validation
# load the best model
model.load_weights(model_loc)
# evaluate model on validation set
model.evaluate(X_valid, y_valid, verbose=1)
# predict on training, validation, and testing sets
preds_train = model.predict(X_train, verbose=1)
preds_val = model.predict(X_valid, verbose=1)
# change cutoff
preds_train2 = np.zeros(np.shape(preds_train))
for n, pred in enumerate(preds_train):
    pred = (pred - pred.min())/pred.max()
    preds_train2[n] = pred
preds_train = np.copy(preds_train2)

preds_val2 = np.zeros(np.shape(preds_val))
for n, pred in enumerate(preds_val):
    pred = (pred - pred.min())/pred.max()
    preds_val2[n] = pred
preds_val = np.copy(preds_val2)
# threshold predictions
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)

train0_loc = directory + "train0.png"

plot_sample(X_train, y_train, preds_train, preds_train_t, ix=0, dim=0)
plt.savefig(train0_loc)
plt.clf()
Exemplo n.º 54
0
    def rhs_equation(x, params, derivs):
        """
        Compute the ODEs
        """
        # pylint: disable=too-many-statements
        θ = scaled_to_rad(x, θ_scale)
        B_r = params[ODEIndex.B_r]
        B_φ = params[ODEIndex.B_φ]
        B_θ = params[ODEIndex.B_θ]
        v_r = params[ODEIndex.v_r]
        v_φ = params[ODEIndex.v_φ]
        ρ = params[ODEIndex.ρ]
        B_φ_prime = params[ODEIndex.B_φ_prime]
        η_O = params[ODEIndex.η_O]
        η_A = params[ODEIndex.η_A]
        η_H = params[ODEIndex.η_H]

        # check sanity of input values
        if ρ < 0:
            if store_internal:
                # pylint: disable=unsubscriptable-object
                problems[θ].append("negative density")
            return 1

        B_mag = sqrt(B_r**2 + B_φ**2 + B_θ**2)

        with errstate(invalid="ignore"):
            b_r, b_φ, b_θ = B_r / B_mag, B_φ / B_mag, B_θ / B_mag

        X = X_func(η_O=η_O, η_A=η_A, η_H=η_H, b_θ=b_θ, b_r=b_r, b_φ=b_φ)
        b = b_func(X=X, v_r=v_r)
        c = c_func(θ=θ,
                   norm_kepler_sq=norm_kepler_sq,
                   a_0=a_0,
                   X=X,
                   v_r=v_r,
                   B_φ=B_φ,
                   B_θ=B_θ,
                   B_r=B_r,
                   ρ=ρ,
                   η_A=η_A,
                   η_O=η_O,
                   η_H=η_H,
                   b_φ=b_φ,
                   b_r=b_r,
                   b_θ=b_θ)
        if (b**2 - 4 * c) < 0:
            log.warning("b = {}".format(b))
            log.warning("c = {}".format(c))
            log.error("Discriminant less than 0, = {}; θ = {}".format(
                b**2 - 4 * c, degrees(θ)))
            return 1
        elif __debug__:
            log.debug("b = {}".format(b))
            log.debug("c = {}".format(c))
            log.debug("Discriminant not less than 0, = {}".format(b**2 -
                                                                  4 * c))

        deriv_B_φ = B_φ_prime
        deriv_B_θ = B_θ * tan(θ) - 3 / 4 * B_r

        deriv_B_r = (B_φ * (η_A * b_φ * (b_r * tan(θ) - b_θ / 4) + η_H *
                            (b_r / 4 + b_θ * tan(θ))) - deriv_B_φ *
                     (η_H * b_θ + η_A * b_r * b_φ) -
                     v_r * B_θ) / (η_O + η_A * (1 - b_φ) * (1 + b_φ)) - B_θ / 4

        deriv_ρ = -ρ * v_φ**2 * tan(θ) - a_0 * (
            B_θ * B_r / 4 + B_r * deriv_B_r + B_φ * deriv_B_φ -
            B_φ**2 * tan(θ))

        if η_derivs:
            deriv_η_scale = deriv_η_skw_func(
                deriv_ρ=deriv_ρ,
                deriv_B_θ=deriv_B_θ,
                ρ=ρ,
                B_r=B_r,
                B_φ=B_φ,
                B_θ=B_θ,
                deriv_B_r=deriv_B_r,
                deriv_B_φ=deriv_B_φ,
            )
            deriv_η_O = deriv_η_scale * η_O_0
            deriv_η_A = deriv_η_scale * η_A_0
            deriv_η_H = deriv_η_scale * η_H_0
        else:
            deriv_η_O = 0
            deriv_η_A = 0
            deriv_η_H = 0

        deriv_b_r, deriv_b_φ, deriv_b_θ = B_unit_derivs(
            B_r=B_r,
            B_φ=B_φ,
            B_θ=B_θ,
            deriv_B_r=deriv_B_r,
            deriv_B_φ=deriv_B_φ,
            deriv_B_θ=deriv_B_θ,
            b_r=b_r,
            b_θ=b_θ,
            b_φ=b_φ,
        )

        C = C_func(η_O=η_O, η_A=η_A, η_H=η_H, b_θ=b_θ, b_r=b_r, b_φ=b_φ)

        A = A_func(η_O=η_O,
                   η_A=η_A,
                   η_H=η_H,
                   b_θ=b_θ,
                   b_r=b_r,
                   b_φ=b_φ,
                   deriv_η_O=deriv_η_O,
                   deriv_η_A=deriv_η_A,
                   deriv_η_H=deriv_η_H,
                   deriv_b_θ=deriv_b_θ,
                   deriv_b_r=deriv_b_r,
                   deriv_b_φ=deriv_b_φ)

        X_dash = X_dash_func(η_O=η_O,
                             η_A=η_A,
                             η_H=η_H,
                             b_θ=b_θ,
                             b_r=b_r,
                             b_φ=b_φ,
                             deriv_η_O=deriv_η_O,
                             deriv_η_A=deriv_η_A,
                             deriv_η_H=deriv_η_H,
                             deriv_b_θ=deriv_b_θ,
                             deriv_b_r=deriv_b_r,
                             deriv_b_φ=deriv_b_φ)

        Z_1 = Z_1_func(θ=θ,
                       a_0=a_0,
                       B_φ=B_φ,
                       B_θ=B_θ,
                       B_r=B_r,
                       ρ=ρ,
                       deriv_B_r=deriv_B_r,
                       deriv_B_φ=deriv_B_φ,
                       deriv_B_θ=deriv_B_θ,
                       v_φ=v_φ,
                       deriv_ρ=deriv_ρ)
        Z_2 = Z_2_func(θ=θ,
                       a_0=a_0,
                       X=X,
                       v_r=v_r,
                       B_φ=B_φ,
                       B_θ=B_θ,
                       B_r=B_r,
                       ρ=ρ,
                       η_A=η_A,
                       η_O=η_O,
                       η_H=η_H,
                       b_φ=b_φ,
                       b_r=b_r,
                       b_θ=b_θ,
                       X_dash=X_dash,
                       deriv_B_r=deriv_B_r,
                       deriv_B_φ=deriv_B_φ,
                       deriv_B_θ=deriv_B_θ,
                       deriv_ρ=deriv_ρ,
                       b=b,
                       c=c,
                       deriv_b_θ=deriv_b_θ,
                       deriv_b_φ=deriv_b_φ,
                       deriv_b_r=deriv_b_r,
                       deriv_η_O=deriv_η_O,
                       deriv_η_A=deriv_η_A,
                       deriv_η_H=deriv_η_H)
        Z_3 = Z_3_func(X=X,
                       v_r=v_r,
                       a_0=a_0,
                       B_θ=B_θ,
                       ρ=ρ,
                       η_O=η_O,
                       η_A=η_A,
                       b_φ=b_φ,
                       b=b,
                       c=c)
        Z_4 = Z_4_func(B_θ=B_θ, B_r=B_r, B_φ=B_φ, deriv_B_φ=deriv_B_φ, θ=θ)
        Z_5 = Z_5_func(η_O=η_O,
                       η_A=η_A,
                       η_H=η_H,
                       b_r=b_r,
                       b_θ=b_θ,
                       b_φ=b_φ,
                       C=C)
        Z_6 = Z_6_func(
            C=C,
            Z_3=Z_3,
            Z_4=Z_4,
            Z_5=Z_5,
            a_0=a_0,
            B_θ=B_θ,
            v_φ=v_φ,
            ρ=ρ,
        )
        Z_7 = Z_7_func(
            C=C,
            Z_1=Z_1,
            Z_2=Z_2,
            Z_3=Z_3,
            Z_4=Z_4,
            a_0=a_0,
            v_φ=v_φ,
            ρ=ρ,
        )

        dderiv_B_φ = dderiv_B_φ_func(
            B_φ=B_φ,
            B_θ=B_θ,
            η_O=η_O,
            η_H=η_H,
            η_A=η_A,
            θ=θ,
            v_r=v_r,
            v_φ=v_φ,
            deriv_B_r=deriv_B_r,
            deriv_B_θ=deriv_B_θ,
            deriv_B_φ=deriv_B_φ,
            deriv_η_O=deriv_η_O,
            deriv_η_A=deriv_η_A,
            deriv_η_H=deriv_η_H,
            A=A,
            C=C,
            b_r=b_r,
            b_θ=b_θ,
            b_φ=b_φ,
            Z_6=Z_6,
            Z_7=Z_7,
            deriv_b_θ=deriv_b_θ,
            deriv_b_φ=deriv_b_φ,
            deriv_b_r=deriv_b_r,
        )

        deriv_v_r = deriv_v_r_func(a_0=a_0,
                                   B_θ=B_θ,
                                   v_φ=v_φ,
                                   ρ=ρ,
                                   dderiv_B_φ=dderiv_B_φ,
                                   Z_1=Z_1,
                                   Z_2=Z_2,
                                   Z_3=Z_3,
                                   Z_4=Z_4)

        deriv_v_φ = deriv_v_φ_func(Z_2=Z_2, Z_3=Z_3, deriv_v_r=deriv_v_r)

        derivs[ODEIndex.B_r] = deriv_B_r
        derivs[ODEIndex.B_φ] = deriv_B_φ
        derivs[ODEIndex.B_θ] = deriv_B_θ
        derivs[ODEIndex.v_r] = deriv_v_r
        derivs[ODEIndex.v_φ] = deriv_v_φ
        derivs[ODEIndex.ρ] = deriv_ρ
        derivs[ODEIndex.B_φ_prime] = dderiv_B_φ
        derivs[ODEIndex.η_O] = deriv_η_O
        derivs[ODEIndex.η_A] = deriv_η_A
        derivs[ODEIndex.η_H] = deriv_η_H

        derivs[ODEIndex.v_θ] = 0
        if no_v_deriv:
            derivs[VELOCITY_INDEXES] = 0
        if __debug__:
            log.debug("θ: {}, {}", θ, degrees(θ))

        if store_internal:
            params_list.append(copy(params))
            derivs_list.append(copy(derivs))
            angles_list.append(θ)

            if len(params_list) != len(angles_list):
                log.error("Internal data not consistent, "
                          "params is {}, angles is {}".format(
                              len(params_list), len(angles_list)))

        return 0
Exemplo n.º 55
0
            temp[index] = f1[j + 200]
            index = index + 1
        m = m + 20
        n = n + 20
        with open('D:/study/Bioinformatics/QSP/200p_200n/10_fold/' + name +
                  '/test/test_' + name + '_' + str(k) + '.csv',
                  'w',
                  newline='') as csvfile:
            writer = csv.writer(csvfile)
            for row in temp:
                writer.writerow(row)
            csvfile.close()
    m = 0
    n = 20
    k = 0
    for k in range(10):
        temp = np.copy(f1)
        temp = np.delete(temp,
                         list(range(m, n)) + list(range(m + 200, n + 200)),
                         axis=0)
        m = m + 20
        n = n + 20
        with open('D:/study/Bioinformatics/QSP/200p_200n/10_fold/' + name +
                  '/train/train_' + name + '_' + str(k) + '.csv',
                  'w',
                  newline='') as csvfile:
            writer = csv.writer(csvfile)
            for row in temp:
                writer.writerow(row)
            csvfile.close()
Exemplo n.º 56
0
    def loss(self, X, y=None):
        """
        Compute loss and gradient for the fully-connected net.

        Input / output: Same as TwoLayerNet above.
        """
        X = X.astype(self.dtype)
        mode = 'test' if y is None else 'train'

        # Set train/test mode for batchnorm params and dropout param since they
        # behave differently during training and testing.
        if self.use_dropout:
            self.dropout_param['mode'] = mode
        if self.use_batchnorm:
            for bn_param in self.bn_params:
                bn_param['mode'] = mode

        scores = None
        ############################################################################
        # TODO: Implement the forward pass for the fully-connected net, computing  #
        # the class scores for X and storing them in the scores variable.          #
        #                                                                          #
        # When using dropout, you'll need to pass self.dropout_param to each       #
        # dropout forward pass.                                                    #
        #                                                                          #
        # When using batch normalization, you'll need to pass self.bn_params[0] to #
        # the forward pass for the first batch normalization layer, pass           #
        # self.bn_params[1] to the forward pass for the second batch normalization #
        # layer, etc.                                                              #
        ############################################################################
        fc_cache = {}
        relu_cache = {}
        bn_cache = {}
        dropout_cache = {}
        batch_size = X.shape[0]

        cache_array=[]
        out_array=[]

        #X = np.reshape(X, [batch_size, -1])  # Flatten our input images.

        # Do as many Affine-Relu forward passes as required (num_layers - 1).
        # Apply batch norm and dropout as required.
        #print("forward path...")
        for i in range(self.num_layers - 1):
            #print("i+1=>", str(i+1))
            # fc_cache start from 1
            fc_act, fc_cache[str(i+1)] = affine_forward(X, self.params['W'+str(i+1)], self.params['b'+str(i+1)])

            if self.use_batchnorm:

                # bn_cache start from 1
                bn_act, bn_cache[str(i+1)] = batchnorm_forward(fc_act, self.params['gamma'+str(i+1)], self.params['beta'+str(i+1)], self.bn_params[i])

                # relu_cache start from 1
                relu_act, relu_cache[str(i+1)] = relu_forward(bn_act)
            else:            
                relu_act, relu_cache[str(i+1)] = relu_forward(fc_act)

            if self.use_dropout:
                # drop_act start from 1
                relu_act, dropout_cache[str(i+1)] = dropout_forward(relu_act, self.dropout_param)

            X = relu_act.copy() # Result of one pass through the affine-relu block.

            cache = (fc_cache[str(i+1)], relu_cache[str(i+1)])

            out_array.append(np.copy(relu_act))

            cache_array.append(np.copy(cache))

        # Final output layer is FC layer with no relu.
        scores, final_cache = affine_forward(X, self.params['W'+str(self.num_layers)], self.params['b'+str(self.num_layers)])
        #print("scores=>", scores)

        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

        # If test mode return early
        if mode == 'test':
            return scores

        loss, grads = 0.0, {}

        loss, dscores = softmax_loss(scores, y)
        loss += 0.5*self.reg*(np.sum(self.params['W'+str(self.num_layers)] ** 2))

        ############################################################################
        # TODO: Implement the backward pass for the fully-connected net. Store the #
        # loss in the loss variable and gradients in the grads dictionary. Compute #
        # data loss using softmax, and make sure that grads[k] holds the gradients #
        # for self.params[k]. Don't forget to add L2 regularization!               #
        #                                                                          #
        # When using batch normalization, you don't need to regularize the scale   #
        # and shift parameters.                                                    #
        #                                                                          #
        # NOTE: To ensure that your implementation matches ours and you pass the   #
        # automated tests, make sure that your L2 regularization includes a factor #
        # of 0.5 to simplify the expression for the gradient.                      #
        ############################################################################

        #print("backward...")
        # Backprop dsoft to the last FC layer to calculate gradients.
        dx_last, dw_last, db_last = affine_backward(dscores, final_cache)

        # Store gradients of the last FC layer
        grads['W'+str(self.num_layers)] = dw_last + self.reg*self.params['W'+str(self.num_layers)]
        grads['b'+str(self.num_layers)] = db_last
#        print("grads[W",str(self.num_layers), "].shape=>", grads['W'+ str(self.num_layers)].shape)
#        print("grads[b",str(self.num_layers), "].shape=>", grads['b'+ str(self.num_layers)].shape)

        for i in reversed(range(self.num_layers-1)):
#            print("i+1=>", i+1)

            #print("dx_last.shape=>", dx_last.shape)
            #print("relu_cache.shape=>", relu_cache[str(i+1)].shape)
            if self.use_dropout:
                dx_last = dropout_backward(dx_last, dropout_cache[str(i+1)])

            drelu = relu_backward(dx_last, relu_cache[str(i+1)])

            if self.use_batchnorm:
                dbatchnorm, dgamma, dbeta = batchnorm_backward(drelu, bn_cache[str(i+1)])
                dx_last, dw_last, db_last = affine_backward(dbatchnorm, fc_cache[str(i+1)])
                grads['beta' + str(i+1)] = dbeta
                grads['gamma' + str(i+1)] = dgamma
            else:
                dx_last, dw_last, db_last = affine_backward(drelu, fc_cache[str(i+1)])

            grads['W'+str(i+1)] = dw_last
            grads['W' + str(i+1)] += self.reg*self.params['W'+str(i+1)]
            grads['b' + str(i+1)] = db_last

#            print("grads[W",str(i+1), "].shape=>", grads['W'+ str(i+1)].shape)
#            print("grads[b",str(i+1), "].shape=>", grads['b'+ str(i+1)].shape)

            # Add reg. loss for each other FC layer.
            loss += 0.5 * self.reg * (np.sum(self.params['W' + str(i+1)]**2))

        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

        return loss, grads
def generate_left_words_from_image(list_of_files, path_to_images, path_to_anots, padded=True):
    '''
    This function just copies the rectangular words
    from the map images
    save in a directory
    '''
    # A is a dictionary of dictionaries
    print padded
    A = {}
    for i in range(len(list_of_files)):
        _dict = np.load(path_to_anots+str(list_of_files[i])+'.npy').item()
        for j in _dict.keys():
            if len(_dict[j]['vertices']) != 4:
                del _dict[j]
        A[i] = _dict

    # dictionary_of_indices is dict of indices of each dicts 
    dictionary_of_indices = {}
    for i in range(len(A)):
        dictionary_of_indices[i] = A[i].keys()

    # read the images in a dic too
    I = {}
    for i in range(len(list_of_files)):
        I[i] = mpimg.imread(path_to_images+str(list_of_files[i])+'.tiff')

    list_of_words = []
    y = []
    image_files = []
    for count in range(0,10000):
        print 'image %d' %count
        # randomly pick a file and a rectangle
        file_ID = np.random.randint(len(A))
        loops = 0
        while len(dictionary_of_indices[file_ID]) <= 1:
            file_ID = np.random.randint(len(A))
            loops = loops+1
            if loops == 20:
                return list_of_words, y, image_files
        anots_ID = np.random.choice(dictionary_of_indices[file_ID])

        print file_ID, anots_ID
        # remove the rectangle from the available keys
        dictionary_of_indices[file_ID].remove(anots_ID)
    
        # now get the information from the dictionary
        image_from_map_info = A[file_ID][anots_ID]
    
        # fulcrum or pivot for map's rotation
        fulcrum = map(int,image_from_map_info['vertices'][0])
        x2 = image_from_map_info['vertices'][1]
        x4 = image_from_map_info['vertices'][3]
        width = int(distance(fulcrum, x2))
        height = int(distance(fulcrum, x4))
        _angle = orientation(fulcrum, x2)
    
        I_cache = np.copy(I[file_ID])
        I_cache, fulcrum = get_crop(I_cache, image_from_map_info['vertices'], fulcrum)
        #get the final crop
        extracted_crop = rotateImage(I_cache, _angle, fulcrum, height, width)

        # get padded image
        if padded:
            final_img = pad_image(extracted_crop)
        else:
            final_img = cv2.resize(extracted_crop, dsize=(487, 135), interpolation=cv2.INTER_CUBIC)

        true_label = 1#np.random.randint(0,1)
        if true_label == 0:
            anots_ID = np.random.choice(dictionary_of_indices[file_ID])
            label = A[file_ID][anots_ID]['name']
        else:
            label = image_from_map_info['name']
    
        list_of_words.append(label)
        y.append(true_label)
        
        # save the image
        image_files.append(final_img)
        #print(list_of_words[count])
        #plt.imshow(image_files[count])
        #plt.show()
        #filenames.append(save_dir+str(count))
        #cv2.imwrite(save_dir+str(count)+'.png', final_img)

    return list_of_words, y, image_files
Exemplo n.º 58
0
def extract_features(imgs,
                     color_space='RGB',
                     spatial_size=(32, 32),
                     hist_bins=32,
                     orient=9,
                     pix_per_cell=8,
                     cell_per_block=2,
                     hog_channel=0,
                     spatial_feat=True,
                     hist_feat=True,
                     hog_feat=True):
    # Create a list to append feature vectors to
    features = []
    # Iterate through the list of images
    for file in imgs:
        file_features = []
        # Read in each one by one
        image = mpimg.imread(file)
        # apply color conversion if other than 'RGB'
        if color_space != 'RGB':
            if color_space == 'HSV':
                feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
            elif color_space == 'LUV':
                feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
            elif color_space == 'HLS':
                feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
            elif color_space == 'YUV':
                feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
            elif color_space == 'YCrCb':
                feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
        else:
            feature_image = np.copy(image)

        if spatial_feat == True:
            spatial_features = bin_spatial(feature_image, size=spatial_size)
            file_features.append(spatial_features)
        if hist_feat == True:
            # Apply color_hist()
            hist_features = color_hist(feature_image, nbins=hist_bins)
            file_features.append(hist_features)
        if hog_feat == True:
            # Call get_hog_features() with vis=False, feature_vec=True
            if hog_channel == 'ALL':
                hog_features = []
                for channel in range(feature_image.shape[2]):
                    hog_features.append(
                        get_hog_features(feature_image[:, :, channel],
                                         orient,
                                         pix_per_cell,
                                         cell_per_block,
                                         vis=False,
                                         feature_vec=True))
                hog_features = np.ravel(hog_features)
            else:
                hog_features = get_hog_features(feature_image[:, :,
                                                              hog_channel],
                                                orient,
                                                pix_per_cell,
                                                cell_per_block,
                                                vis=False,
                                                feature_vec=True)
            # Append the new feature vector to the features list
            file_features.append(hog_features)
        features.append(np.concatenate(file_features))
    # Return list of feature vectors
    return features
def kMedoids(D, k, tmax=100):
    # determine dimensions of distance matrix D
    m, n = D.shape

    if k > n:
        raise Exception('too many medoids')

    # find a set of valid initial cluster medoid indices since we
    # can't seed different clusters with two points at the same location
    valid_medoid_inds = set(range(n))
    invalid_medoid_inds = set([])
    rs,cs = np.where(D==0) # rs and cs indicates row and column no. where distance value is 0
    # the rows, cols must be shuffled because we will keep the first duplicate below
    index_shuf = list(range(len(rs)))
    np.random.shuffle(index_shuf)
    rs = rs[index_shuf]
    cs = cs[index_shuf]
    for r,c in zip(rs,cs):
        # if there are two points with a distance of 0...
        # keep the first one for cluster init
        if r < c and r not in invalid_medoid_inds:
            invalid_medoid_inds.add(c)
    valid_medoid_inds = list(valid_medoid_inds - invalid_medoid_inds)

    if k > len(valid_medoid_inds):
        raise Exception('too many medoids (after removing {} duplicate points)'.format(
            len(invalid_medoid_inds)))

    # randomly initialize an array of k medoid indices
    M = np.array(valid_medoid_inds)
    np.random.shuffle(M)
    M = np.sort(M[:k])

    # create a copy of the array of medoid indices
    Mnew = np.copy(M)

    # initialize a dictionary to represent clusters
    C = {}
    for t in range(tmax):
        # determine clusters, i. e. arrays of data indices
        J = np.argmin(D[:,M], axis=1)
        for kappa in range(k):
            C[kappa] = np.where(J==kappa)[0]
        # update cluster medoids
        for kappa in range(k):
            J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1)
            j = np.argmin(J)
            Mnew[kappa] = C[kappa][j]
        np.sort(Mnew)
        # check for convergence
        if np.array_equal(M, Mnew):
            break
        M = np.copy(Mnew)
    else:
        # final update of cluster memberships
        J = np.argmin(D[:,M], axis=1)
        for kappa in range(k):
            C[kappa] = np.where(J==kappa)[0]

    # return results
    return M, C
Exemplo n.º 60
0
def jPCA(data, times):
    #PCA
    from sklearn.decomposition import PCA
    n = 6
    pca = PCA(n_components=n)
    zpca = pca.fit_transform(data)
    pc = zpca[:, 0:2]
    eigen = pca.components_
    phi = np.mod(np.arctan2(zpca[:, 1], zpca[:, 0]), 2 * np.pi)
    #jPCA
    X = pca.components_.transpose()
    dX = np.hstack(
        [np.vstack(derivative(times, X[:, i])) for i in range(X.shape[1])])
    #build the H mapping for a given n
    # work by lines but H is made for column based
    n = X.shape[1]
    H = buildHMap(n)
    # X tilde
    Xtilde = np.zeros((X.shape[0] * X.shape[1], X.shape[1] * X.shape[1]))
    for i, j in zip((np.arange(0, n**2, n)),
                    np.arange(0, n * X.shape[0], X.shape[0])):
        Xtilde[j:j + X.shape[0], i:i + X.shape[1]] = X
    # put dx in columns
    dXv = np.vstack(dX.transpose().reshape(X.shape[0] * X.shape[1]))
    # multiply Xtilde by H
    XtH = np.dot(Xtilde, H)
    # solve XtH k = dXv
    k, residuals, rank, s = np.linalg.lstsq(XtH, dXv)
    # multiply by H to get m then M
    m = np.dot(H, k)
    Mskew = m.reshape(n, n).transpose()
    # Contruct the two vectors for projection with MSKEW
    evalues, evectors = np.linalg.eig(Mskew)
    # index = np.argsort(evalues).reshape(5,2)[:,1]
    index = np.argsort(
        np.array([np.linalg.norm(i) for i in evalues]).reshape(int(n / 2),
                                                               2)[:, 0])
    evectors = evectors.transpose().reshape(int(n / 2), 2, n)
    u = np.vstack([
        np.real(evectors[index[-1]][0] + evectors[index[-1]][1]),
        np.imag(evectors[index[-1]][0] - evectors[index[-1]][1])
    ]).transpose()
    # PRoject X
    rX = np.dot(X, u)
    rX = rX * -1.0
    score = np.dot(data, rX)
    phi2 = np.mod(np.arctan2(score[:, 1], score[:, 0]), 2 * np.pi)
    # Construct the two vectors for projection with MSYM
    Msym = np.copy(Mskew)
    Msym[np.triu_indices(n)] *= -1.0
    evalues2, evectors2 = np.linalg.eig(Msym)
    v = evectors2[:, 0:2]
    rY = np.dot(X, v)
    score2 = np.dot(data, rY)
    phi3 = np.mod(np.arctan2(score2[:, 1], score2[:, 0]), 2 * np.pi)
    dynamical_system = {
        'x': X,
        'dx': dX,
        'Mskew': Mskew,
        'Msym': Msym,
    }
    return (rX, phi2, dynamical_system)