def dense_image_warp(image, flow):
    # batch_size, height, width, channels = (array_ops.shape(image)[0],
    #                                        array_ops.shape(image)[1],
    #                                        array_ops.shape(image)[2],
    #                                        array_ops.shape(image)[3])
    batch_size, height, width, channels = (np.shape(image)[0],
                                           np.shape(image)[1],
                                           np.shape(image)[2],
                                           np.shape(image)[3])

    # The flow is defined on the image grid. Turn the flow into a list of query
    # points in the grid space.
    # grid_x, grid_y = array_ops.meshgrid(
    #     math_ops.range(width), math_ops.range(height))
    # stacked_grid = math_ops.cast(
    #     array_ops.stack([grid_y, grid_x], axis=2), flow.dtype)
    # batched_grid = array_ops.expand_dims(stacked_grid, axis=0)
    # query_points_on_grid = batched_grid - flow
    # query_points_flattened = array_ops.reshape(query_points_on_grid,
    #                                            [batch_size, height * width, 2])
    grid_x, grid_y = np.meshgrid(np.range(width), np.range(height))
    stacked_grid = np.cast(np.stack([grid_y, grid_x], axis=2), flow.dtype)
    batched_grid = np.expand_dims(stacked_grid, axis=0)
    query_points_on_grid = batched_grid - flow
    query_points_flattened = np.reshape(query_points_on_grid,
                                        [batch_size, height * width, 2])
    # Compute values at the query points, then reshape the result back to the
    # image grid.
    interpolated = interp2d(image, query_points_flattened)
    interpolated = np.reshape(interpolated,
                              [batch_size, height, width, channels])
    return interpolated
Esempio n. 2
0
File: e032.py Progetto: ccdunn/euler
def solve(N):

    digits = np.arange(1, N + 1, dtype=int)
    digits1 = digits[np.newaxis, :]
    digits2 = digits[:, np.newaxis]
    digits3 = N - digits1 - digits2

    lb = np.log10(10**(digits1 - 1 + digits2 - 1))
    ub = np.log10(10**(digits1 + digits2))
    valid_digits = np.where(np.logical_and(digits3))

    pans = np.array([], dtype=int)
    # neither ones place of multiplicands can be a 1
    # odd times a 5 not allowed, nor is even times a 6
    for ones_1 in np.range(3, 10, dtype=int):
        for ones_2 in np.range(2, ones_1, dtype=int):
            # if (ones_1 == 5 and ones_2 % 2) or (ones_2 == 5 and ones_1 % 2):
            #     continue
            # if ones_1 % 2 and ones_2 % 2:
            #     continue
            ones_prod = (ones_1 * ones_2) % 10
            if ones_prod == ones_1 or ones_prod == ones_2:
                continue
            # six digits left to distribute
            # at most 4 digits per multiplicand
            # must be at least as many digits in product as in largest multiplicand
            # if one multiplicand is one digit, other must be 4 digits
            # A =
            # if one multiplicand is 2 digits, other must 3 digits
            # if one multiplicand is 3 digits, other must be at least 2 digits, at most 3 digits
            # if one multiplicand is 4 digits, other must be 1 digit

    return
Esempio n. 3
0
    def merge(self, another_system):
        self.box_x = max(self.box_x, another_system.box_x)
        self.box_y = max(self.box_y, another_system.box_y)
        self.box_z = max(self.box_z, another_system.box_z) + 1.3

        mols = deepcopy(another_system.molecules)
        diff_z = (self.box_z - 0.4) - self.molecules[0].atoms[0].z
        iterations = int(sys.argv[3])
        if sys.argv[4] == "1":
            source = deepcopy(self.molecules)
            self.molecules = []
        else:
            source = deepcopy(mols)
        base_y = self.box_y * 0.7
        base_x = self.box_z * 0.7
        base_list_y = range(0, base_y, base_y / iterations**(1. / 2))
        base_list_x = range(0, base_x, base_x / iterations**(1. / 2))
        for diff_y, diff_x in list(product(base_list_y,
                                           base_list_x))[:iterations]:
            #diff_y = random.random() * self.box_y * 0.7
            #diff_z = random.random() * self.box_z  * 0.7

            mult = deepcopy(source)
            for mol in mult:
                for atom in mol.atoms:
                    atom.x += diff_x
                    atom.y += diff_y
                    atom.z += diff_z
            self.molecules += mult

        self.molecules += deepcopy(mols)
Esempio n. 4
0
    def merge(self, another_system):
        self.box_x = max(self.box_x, another_system.box_x) 
        self.box_y = max(self.box_y, another_system.box_y)  
        self.box_z = max(self.box_z, another_system.box_z) + 1.3
        

        mols = deepcopy(another_system.molecules)
        diff_z = (self.box_z - 0.4) - self.molecules[0].atoms[0].z
        iterations = int(sys.argv[3])
        if sys.argv[4] == "1":
            source = deepcopy(self.molecules)
            self.molecules = []
        else:
            source = deepcopy(mols)
        base_y = self.box_y * 0.7
        base_x = self.box_z * 0.7
        base_list_y = range(0, base_y, base_y / iterations**(1./2))
        base_list_x = range(0, base_x, base_x / iterations**(1./2))
        for diff_y, diff_x in list(product(base_list_y, base_list_x))[:iterations]:
            #diff_y = random.random() * self.box_y * 0.7
            #diff_z = random.random() * self.box_z  * 0.7

            mult = deepcopy(source)
            for mol in mult:
                for atom in mol.atoms:
                    atom.x += diff_x
                    atom.y += diff_y
                    atom.z += diff_z
            self.molecules += mult

        self.molecules += deepcopy(mols)
Esempio n. 5
0
 def __mul__(self, value):
     temp_lst = range(len(self) * value)
     x = 0
     for i in range(value):
         for j in self:
             temp_lst[x] = j
             x += 1
     return list_2(temp_lst)
def precook(s, n=4, out=False):
    """Takes a string as input and returns an object that can be given to
    either cook_refs or cook_test. This is optional: cook_refs and cook_test
    can take string arguments as well."""
    words = s.split()
    counts = defaultdict(int)
    for k in np.range(1,n+1):
        for i in np.range(len(words)-k+1):
            ngram = tuple(words[i:i+k])
            counts[ngram] += 1
    return (len(words), counts)
Esempio n. 7
0
    def channels_last_to_channels_first(weights, n_pre_rows, n_pre_cols,
                                        n_pre_channs):
        new_w = np.zeros_like(weights)
        rows = np.repeat(np.range(n_pre_rows), n_pre_cols)
        cols = np.tile(np.range(n_pre_cols), n_pre_rows)
        n_per_ch = n_pre_cols * n_pre_rows
        for ch in range(n_channs):
            lrows = (rows * n_cols * n_channs + cols * n_channs + ch)
            sr = ch * n_per_ch
            er = sr + n_per_ch
            new_w[sr:er, :] = weights[lrows, :]

        return new_w
Esempio n. 8
0
    def _get_anchors(self, feature_height, feature_width, level):

        rx = np.range(feature_width, dtype=np.float) + 0.5
        ry = np.range(feature_height, dtype=np.float) + 0.5
        centers = np.stack(np.meshgrid(rx, ry),
                           axis=-1) * self._strides[level - 3]
        centers = np.expand_dims(centers, axis=-2)
        centers = np.tile(centers, [1, 1, self._num_anchors, 1])
        dims = np.tile(self._anchor_dims[level - 3],
                       [feature_height, feature_width, 1, 1])
        anchors = np.concatenate([centers, dims], axis=-1)
        return np.reshape(
            anchors, [feature_height * feature_width * self._num_anchors, 4])
Esempio n. 9
0
 def insert(self, index, object):
     temp_lst = array(range(len(self) + 1))
     temp_bool = False
     for x in range(len(self)):
         if x == index:
             temp_lst[x] = object
             temp_lst[x + 1] = self[x]
             temp_bool = True
         elif temp_bool:
             temp_lst[x + 1] = self[x]
         else:
             temp_lst[x] = self[x]
     self._lst = temp_lst
Esempio n. 10
0
 def pick_index(length, counts, is_temporal_segment=True):
     if is_temporal_segment:
         k = random.random()
         if k < 0:
             index = np.random.randint(0, length - counts)
             return np.range(index, index + counts)
         else:
             g = np.array_split(np.arange(length), counts)
             index_list = []
             for i in g:
                 index_list.append(np.random.choice(i))
             return index_list
     else:
         index = np.random.randint(0, length - counts)
         return np.range(index, index + counts)
Esempio n. 11
0
def get_graph_nbrhd_embd_text(train_graph, ent, max_text_nbrs):
  """Helper to get neighbor text relations from embedded data."""
  neighborhood = []
  neighborhood_emb = []
  for nbr in train_graph.kg_text_data[ent]:
    for sid in train_graph.kg_text_data[ent][nbr]:
      neighborhood.append(nbr)
      eid = train_graph.emb_id_map[sid]
      neighborhood_emb.append(train_graph.embeddings[eid])
  if not neighborhood:
    neighborhood = [[]]
    neighborhood_emb = [np.zeros(train_graph.embeddings[0].size)]
  neighborhood = np.array(neighborhood, dtype=np.int)
  neighborhood_emb = np.array(neighborhood_emb, dtype=np.float32)
  if neighborhood.shape[0] > max_text_nbrs:
    ids = np.random.choice(np.range(neighborhood.shape[0]),
                           size=max_text_nbrs, replace=False)
    neighborhood = neighborhood[ids]
    neighborhood_emb = neighborhood_emb[ids]
  else:
    neighborhood = sample_or_pad(neighborhood, max_text_nbrs,
                                 pad_value=train_graph.ent_pad)
    neighborhood_emb = sample_or_pad(neighborhood_emb, max_text_nbrs,
                                     pad_value=0)

  return neighborhood, neighborhood_emb
Esempio n. 12
0
def makepopii(snapshot,mvir,vmax,rvir,mvir_prog,coldgas,hotgas,blowout,mstar):

#make the timesteps
    print " snapshot: %3.2e" % (snapshot)
    print "     mvir: %3.2e" % (mvir)
    print "mvir_prog: %3.2e" % (mvir_prog)
    print "     vmax: %3.2f" % (vmax)
    print "     rvir: %3.2f" % (rvir)
    print "  coldgas: %3.2e" % (coldgas)
    print "   hotgas: %3.2e" % (hotgas)
    print "  blowout: %3.2e" % (blowout)
    print "    mstar: %3.2e" % (mstar)

    nsteps = 50.
    timestep_mini = np.array(np.range(nsteps))*(t_current - t_prev)/nsteps + t_prev

#initialise some variables to return here
    coldgas_now = 0.
    hotgas_now = mvir*bac.fb
    mstar_now = 0.
    blowout_now = 0.
    macc_now = 0.

#check the DM mass difference for Macc
    delta_mdm = mvir - mvir_prog
    if delta_mdm > 0:

        for ti in range(0,nsteps):
            mstar_now += (hotgas_now/tdyn*timestep_mini[ti])

    return "return your output fields here"
Esempio n. 13
0
def draw_circle(h, v, r):
    x = []
    y = []
    for i in range(0, 360):
        x.append(h + r * cos(radians(i)))
        y.append(v + r * sin(radians(i)))
    return (x, y)
Esempio n. 14
0
    def stackTimeSeries(self, filterKernal):
        """
        TODO
        Input:
        filterKernal = a numpy array consisting of filter kernal
        e.g Hanning, Kaiser, etc...
        """
        if filterKernal.size > 3:
            bkernal = np.ones((3, 1))  # create basic 3 T kernal
            bkernal[1] = -2.0
            bsgn = np.ones((1, filterKernal.size))
            bsgn[0, 0::2] = bsgn[0, 0::2] * -1.0  # linear drift removal
            bwt = np.zeros((3, filterKernal.size))
            bwt = np.matmul(bkernal,
                            bsgn * filterKernal)  # creates stack kernal

            # map and sum the weighted kernal of bwt
            k0 = np.range(filterKernal.size * 3) + 1
            tmp1 = np.arange(3) + 1
            tmp2 = np.ones(filterKernal.size)
            tmp3 = np.ones((3, 1))
            tmp4 = np.arange(filterKernal.size)
            knew = np.matmul(tmp1, tmp2) + np.matmul(tmp3,
                                                     (tmp4 *
                                                      (filterKernal.size + 3)))
Esempio n. 15
0
    def random_choose_abc(self):
        i = tf.range(self.NP)
        a = tf.random.uniform([self.NP],
                              maxval=self.NP - 1,
                              dtype=tf.dtypes.int32)
        b = tf.random.uniform([self.NP],
                              maxval=self.NP - 2,
                              dtype=tf.dtypes.int32)
        c = tf.random.uniform([self.NP],
                              maxval=self.NP - 3,
                              dtype=tf.dtypes.int32)

        a += tf.cast(a >= i, tf.dtypes.int32)

        ia = tf.sort([i, a], 0)

        for last in ia:
            b += tf.cast(b >= last, tf.dtypes.int32)

        iab = tf.sort([i, a, b], 0)

        for last in iab:
            c += tf.cast(c >= last, tf.dtypes.int32)

        return a, b, c
    def loss(self, X_batch, y_batch, reg):
        """
    Compute the loss function and its derivative. 
    Subclasses will override this.

    Inputs:
    - X_batch: A numpy array of shape (N, D) containing a minibatch of N
      data points; each point has dimension D.
    - y_batch: A numpy array of shape (N,) containing labels for the minibatch.
    - reg: (float) regularization strength.

    Returns: A tuple containing:
    - loss as a single float
    - gradient with respect to self.W; an array of the same shape as W
    """
        loss = 0.0
        train_num = X_batch.shape[0]
        score = np.dot(X_batch, self.W)
        real_score = np.reshape(score[np.arange(train_num), y],
                                (train_num, -1))
        score = score - real_score + 1.0
        score[np.arange(train_num), y] = 0.0
        score[score < 0] = 0.0
        loss += np.sum(score) / train_num
        loss += reg * np.sum(W * W)

        dW = np.zeros(self.W.shape)
        score_mask = np.zeros(score.shape)
        score_mask[score > 0] = 1
        y_sum = np.sum(score_mask, axis=1)
        score_mask[np.range(train_num), y] -= y_sum
        dW = np.dot(X_batch.T, score_mask) / train_num
        dW += dW * reg

        return loss, dW
Esempio n. 17
0
    def train_network(self, num_epochs):
        with tf.Session() as sess:
            sess.run(tf.initialize_all_variables())
            for epoch_number in np.range(num_epochs):

                # get current batch
                batch_x, batch_y = generate_batch()

                current_prediction = forward_all_timesteps(self.xts)

                ## the RMSE loss function
                losses = sqrt(
                    tf.reduce_mean(
                        tf.square(tf.sub(current_prediction, self.yts))))
                total_loss = tf.reduce_mean(losses)
                train_step = tf.train.AdagradOptimizer(
                    self.learning_rate).minimize(losses)

                sess.run([train_step],
                         feed_dict={
                             self.xts: batch_x,
                             self.yts: batch_y
                         })

                current_loss_epoch = sess.run([total_loss],
                                              feed_dict={
                                                  self.xts: batch_x,
                                                  self.yts: batch_y
                                              })

                printare = 'at epoch ' + str(
                    epoch_number) + ' we have loss value : ' + str(
                        current_loss_epoch)
                print printare
Esempio n. 18
0
def softmax_loss_naive(W, X, y, reg):

    # Initialize the loss and gradient to zero.
    loss = 0.0
    dW = np.zeros_like(W)    # 得到一个和W同样shape的矩阵
    dW_each = np.zeros_like(W)
    num_train, dim = X.shape
    num_class = W.shape[1]
    f = X.dot(W)    # N by C
    # Considering the Numeric Stability
    f_max = np.reshape(np.max(f, axis=1), (num_train, 1))   # 找到最大值然后减去,这样是为了防止后面的操作会出现数值上的一些偏差
    prob = np.exp(f - f_max) / np.sum(np.exp(f - f_max), axis=1, keepdims=True) # N by C
    y_trueClass = np.zeros_like(prob)
    y_trueClass[np.range(num_train), y] = 1.0
    for i in range(num_train):
        for j in range(num_class):
            loss += -(y_trueClass[i, j] * np.log(prob[i, j]))
            dW_each[:, j] = -(y_trueClass[i, j] - prob[i, j]) * X[i, :]
        dW += dW_each
    loss /= num_train
    loss += 0.5 * reg * np.sum(W * W)  # 加上正则
    dW /= num_train
    dW += reg * W

    return loss, dW
Esempio n. 19
0
def compute_saliency_maps(X, y, model):
    """
  Compute a class saliency map using the model for images X and labels y.
  
  Input:
  - X: Input images, of shape (N, 3, H, W)
  - y: Labels for X, of shape (N,)
  - model: A PretrainedCNN that will be used to compute the saliency map.
  
  Returns:
  - saliency: An array of shape (N, H, W) giving the saliency maps for the input
    images.
  """
    saliency = None
    ##############################################################################
    # TODO: Implement this function. You should use the forward and backward     #
    # methods of the PretrainedCNN class, and compute gradients with respect to  #
    # the unnormalized class score of the ground-truth classes in y.             #
    ##############################################################################
    N, C, H, W = X.shape
    saliency = np.zeros((N, H, W))

    scores, cache = model.forward(X, mode='test')  #(n,c)

    #set the dscores to be [0,0,....1,....,0], then backprop to image ppt32 lec9
    dscores = np.zeros_like(scores)
    dscores[np.range(N), y] = 1.0
    dX, grads = model.backward(dscores, cache)
    saliency = np.max(np.abs(dX), axis=1)
    ##############################################################################
    #                             END OF YOUR CODE                               #
    ##############################################################################
    return saliency
def batch_iter(corpus, label, batch_size, epochs, shuffle):
    """
    # TODO
    specific task data and label are chosen randomly, \
    each batch of data come from the same task, but different sentiment polarity
    """
    start_indices = [0] * len(params["task"])
    epoch_count = [0] * len(params["task"])
    data = []
    for i in range(len(params["task"])):
        array = np.array(list(zip(corpus[i], label[i])))        
        data.append(array)
    while True:
        if _end_batch_iter(epoch_count, epochs):
            break
        else:
            while True:
                task = random.randint(0, len(params["task"]) - 1)
                if epoch_count[task] != epochs:
                    break
            data_size = len(data[task])
            start_index = start_indices[task]
            end_index = min(start_index + batch_size, data_size)
            start_indices[task] = end_index if end_index != data_size else 0
            epoch_count[task] += (0 if end_index != data_size else 1)

            if end_index == data_size and epoch_count[task] < epochs and shuffle:
                shuffled_indices = np.random.permutation(
                    np.range(data_size))  # TODO need to be tested
                data[task] = data[task][shuffled_indices]
                epoch_count[task] += 1
            elif end_index == data_size:
                epoch_count[task] += 1

            yield task, data[task][start_index: end_index]
Esempio n. 21
0
    def Series_Cat(self, name_lists, run_dic={}, mode='processed'):
        '''
        Concat series together, very useful for cutted series.

        Parameters
        ----------
        name_lists : (list)
            List of cells you want to plot. Only common cells will be plotted.
        run_dic : (dic), optional
            Dictionary of runs you want to concate. Key will be runname, 2-turple will be start & end time (s). The default is {}.
        mode : ('processed' or 'raw'), optional
            Mode of series. The default is 'processed'.

        Returns
        -------
        catted_series : (pd Frame)
            Concated series in given order. 

        '''
        run_num = len(run_dic.keys())
        run_lists = list(run_dic.keys())
        common_cells = self.Find_Common_Cells(name_lists, run_lists)
        catted_series = pd.DataFrame(index=common_cells)
        for i in range(run_num):
            c_runname = run_lists[i]
            c_frame = self.Get_Spon_Train(run_lists[i], run_dic[c_runname][0],
                                          run_dic[c_runname][1],
                                          mode).loc[common_cells]
            catted_series = pd.concat([catted_series, c_frame], axis=1)
        catted_series.columns = np.range(len(catted_series.columns))
        return catted_series
Esempio n. 22
0
def position_encoding(current, min_rate=.0001):
  """Add original Transformer positional encodings,

  Args:
    current:  [batch_size, seq_length, features] sequence
    min_rate:

  Returns:
    sequence w/ positional encodings concatenated.
  """
  seq_length = current.shape[1].value
  features = current.shape[2].value

  assert(features % 2 == 0)

  # compute angle rates
  angle_rate_exponents = np.linspace(0, 1, features//2)
  angle_rates = min_rate**angle_rate_exponents

  # compute angle radians
  positions = np.range(seq_length)
  angle_rads = positions[:, np.newaxis] * angle_rates[np.newaxis, :]

  # sines and cosines
  sines = np.sin(angle_rads)
  cosines = np.cos(angle_rads)
  pos_encode = np.concatenate([sines, cosines], axis=-1)

  return current
Esempio n. 23
0
def get_new_seq(rot_triplet,ntot):
#  rot_triplet is size n x 3
    #print('rot_triplet ', len(rot_triplet))
    rot_triplet=np.array(rot_triplet,dtype=int)
    if (rot_triplet.size==0):
        nRotTriplets=0
        nb=0
        #return np.array([]),0,0;
    else:
        nRotTriplets,nb = rot_triplet.shape;

    #print(nRotTriplets,nb,rot_triplet.flatten())
    
    if (nb != 3 and nRotTriplets != 0 and ntot!= 0):
        print('**ERROR: the number of column vectors in the rotating triplet must equal 3, the num of blades');
        new_seq = np.range(1,ntot)
    else:
        non_rotating = np.ones(ntot,dtype=int);
        #print(non_rotating)
        non_rotating[rot_triplet.flatten()] = 0; # if they are rotating, set them false;
        a=np.array(np.nonzero(non_rotating)).flatten()
        b=(rot_triplet.reshape(nRotTriplets*nb, 1)).flatten()
        new_seq = np.concatenate((a,b));

        #print(new_seq)
    return new_seq,nRotTriplets,nb
Esempio n. 24
0
    def invert_L2_svd():
        """ SBAS time series inversion using L2 norm SVD
        reference: Berardino et. al. 2002 IEEE Transaction on Geoscience and Remote Sensing"""
        print('Starting SVD inversion')

        pix2avevel = np.nans(ts.size)
        pix2cumdef = np.nans(ts.size)

        for i in np.range(ts.WIDTH):
            print('column {0}'.format(i))
            pix2date = np.zeros(ts.LENGTH, ts.DATES)
            pix2model = np.zeros(ts.LENGTH, ts.DT)
            colPix = np.zeros(ts.LENGTH, ts.IGRAMS)

            # concatenate same column from each interferogram into an array
            for j, ig in enumerate(ts):
                column = np.fromfile(ig.NAME, dtype=float16, size=ts.LENGTH)
                colPix[:,j] = column

            pix2igram = np.isfinite(colPix)
            coverage = np.fromfile(coverage) #laod DQmap
            iterPixels = np.where(coverage >= ts.igthresh)

            #preform pixel-by-pixel inversion
            for k, pixel in enumerate(iterPixels):
                indIG = find(pix2igram[pixel,:])==1
                indDate = unique(ts.timeIndex[indIG,:])
                dtVector = np.diff(ts.Serial(indDate)) / 365.242 #convert years to days

                # Set up B matrix
                B = np.zeros(len(indIG), len(dtVector))

        print('Done')
Esempio n. 25
0
  def develop(self):
    """
    Develop the 3D shape into 2D
    """
    return
    ## Every flat generatrice is 2 2D points
    flatgen = np.zeros((n_gen, 2, 2))
    flatgen[0,0] = np.zeros(2)
    flatgen[0,1] = np.array([0, length(self.gen[0])])
    ## 1st generatrice is already in same plane as 0st
    u1 = self.support[1] - self.support[0]
    c = cos(self.gen[1], u1)
    s = cos(self.gen[1], u1)
    d1 = np.array(length(u1)*s + length(u1)*c)
    flatgen[1,0] = flatgen[0,0] + d1
    
    for i in np.range(1, n_gen):
        ## v is rotation axis
        v = self.gen[i]
        u1 = self.support[i] - self.support[i-1]
        u2 = self.support[i] + self.gen[i] - self.support[i-1] - self.gen[i-1] 
	## Pick the best triangle for reference plane
    	c1 = np.cross(u1, v)
        c2 = np.cross(v, u2)
        if length(c1) > length(c2):
           u = u1
        else:
           u = u2
        ## Compute angle between gen and v
        c = cos(self.gen[i+1], v)
        alpha = np.acos(c)
        ## alpha is preserved in rotation, such as length of gen
        
    return self
Esempio n. 26
0
def _process_one_file(thread_index, ranges, name, meta_data, num_shards):
    """Process and save a subset of meta data as TFRecord files in one thread.
    Args:
        thread_index: Integer thread identifier with in [0, len(ranges)]
        ranges: A list of pairs of integers specifying the ranges of the dataset to process in parallel
        name: Unqiue identifier specifying the dataset
        meta_data: List of raw data
        num_shards: Integer, number of shards for output files
    """
    # Each thread process N shards wher N = num_shards / num_threads. For instance, if num_threads = 2
    # and num_shards = 128, then the first thread would produce shards [0, 64)
    num_threads = len(ranges)
    assert not num_shards % num_threads
    num_shards_per_batch = int(num_shards / num_threads)

    start_point = ranges[thread_index][0]
    end_point = ranges[thread_index][1]
    shard_ranges = np.linspace(start_point, end_point,
                               num_shards_per_batch + 1).astype(int)

    num_data_in_thread = end_point - start_point

    counter = 0
    for s in range(num_shards_per_batch):
        # generate a shards version of the file name, eg: 'train-00001-of-00010'
        shard = thread_index * num_shards_per_batch + s
        output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
        output_file = os.path.join("", output_filename)

        # write to file
        writer = tf.python_io.TFRecordWriter(output_file)

        shard_counter = 0
        data_in_shard = np.range(shard_ranges[s],
                                 shard_ranges[s + 1],
                                 dtype=int)
        for i in data_in_shard:
            data = meta_data[i]

            # Serialize, here you can choose different serialize mehtod
            sequence_example = _text_to_sequence_example(data)
            #sequence_example = sequence_to_example()
            if sequence_example is not None:
                writer.write(sequence_example.SerializeToString())
                shard_counter += 1
                counter += 1

            # print info
            if not counter % 100:
                print(
                    "%s [thread %d]: Processed %d of %d items in thread batch "
                    % (datetime.now(), thread_index, shard_counter,
                       num_data_in_thread))

            sys.stdout.flush()

        writer.close()
    print("{} [shards {}]: Wrote {} data to {} shards".format(
        datetime.now(), thread_index, counter, num_data_in_thread))
    sys.stdout.flush()
Esempio n. 27
0
def plt_func_lines(funcs,
                   xarry=None,
                   cols=None,
                   figure=None,
                   fig_size=None,
                   save_name=None):
    """
  Draw plt by function
  Args:
    fig_size: width, height
  """
    fg = plt.figure(figsize=fig_size) if (figure == None) else figure
    if (xarry == None):
        xarry = np.range(-5.0, 5.0, 100)
    if (type(funcs) != list):
        funcs = [funcs]

    for i in range(len(funcs)):
        fg.plot(xarry,
                funcs[i](xarry),
                color=__DEF_COLORS[i % len(__DEF_COLORS)],
                linewidth=1.0,
                linestyle=__DEF_LINE_STY[i % len(__DEF_LINE_STY)])
    if (save_name != None): fg.savefig(save_name)
    return fg
Esempio n. 28
0
def compute_MC_Residual_Histogram(capital, m,state_grid, state_poly_grid, epsilon_grid, asset_grid, alpha, N, delta, beta, mu,tau, sigma, n_epsilon, n_assets ):
    """
    Computes residual of market-clearing condition to compute an initial guess for exponential distribution family.
    """

    # solve for market clearing capital level
    #first compute prices of representative firm in winberry 2018.
    r= alpha * (capital **(alpha -1))*N**(1-alpha)-delta
    w = capital**(1-alpha)*N**(-alpha)
    # mu unemployment benefits
    init_val_grid = np.log(beta*(1+r)*w*(mu*(1-epsilon_grid)+(1-tau)*epsilon_grid+r*asset_grid**(-sigma))
    coef =  np.zeros(n_epsilon, n_assets)
    for i in np.range(0,n_epsilon):
        

    #guess prices

    # compute grids for firms (only if doing multi-firm version)
    #labor_demand_grid =(np.exp(m_state_grid[:,0])*(m_state_grid[:,1])**(θ)/wage_guess)**(1/(1-ν))
    #profit_grid = (1-τ)*(np.exp(m_state_grid[:,1])*(m_state_grid[:,1])**(θ))*(labor_demand_grid**(ν)) - wage_guess * labor_demand_grid

    # initalize value function  init_est of value=pi+(1-d)*K
    #init_val_grid = profit_grid + (1-δ)*m_state_grid[:,1]
    #estimated 
    #coeff = np.sum(state_poly)
  
Esempio n. 29
0
def spearmanr(x, y):
    """
    Spearman's rank correlation coefficient, nonparametric measure of statistical dependence between
        two variables. It assesses how well the relationship between two variables can be
        described using a monotonic function. If there are no repeated data values,
        a perfect Spearman correlation of +1 or −1 occurs when each of the variables
        is a perfect monotone function of the other.
    :param x:
    :param y:
    :return:
    """
    n = len(x)
    xi = np.argsort(x)
    yi = np.argsort(y)
    yii = np.argsort(yi)  # inverse

    # xs = np.array([x[i] for i in xi])
    # ys = np.array([y[i] for i in xi])
    # ysy = np.array([y[i] for i in yi])

    xr = np.range(n) + 1
    yr = np.array([yii[i] for i in xi]) + 1
    dr = (xr - yr) * (xr - yr)
    ds = np.sum(dr)

    return 1 - (6.0 * ds) / (n * (n * n - 1.0))
def batch_iter(data, batch_size, epochs, shuffle):
    """
    specific task data and label are chosen randomly, \
    each batch of data come from the same task, but different sentiment polarity
    """
    start_indices = [0] * len(params["task"])
    epoch_count = [0] * len(params["task"])
    while True:
        if _end_batch_iter():
            break
        else:
            while True:
                task = random.randint(0, len(params["task"]) - 1)
                if epoch_count[task] != epochs:
                    break
            data_size = len(data[task])
            start_index = start_indices[task]
            end_index = min(start_index + batch_size, data_size)
            start_indices[task] = end_index if end_index != data_size else 0
            epoch_count[task] += (0 if end_index != data_size else 1)

            if end_index == data_size and epoch_count[
                    task] < epochs and shuffle:
                shuffled_indices = np.random.permutation(
                    np.range(data_size))  # TODO
                data[task] = data[task][shuffled_indices]
                epoch_count[task] += 1
            elif end_index == data_size:
                epoch_count[task] += 1

            yield task, data[task][start_index:end_index]
Esempio n. 31
0
 def adapt(self, data_sample):
     self.vocab = get_vocabulary(data_sample, self.max_vocabulary_size)
     words = tf.constant(self.vocab)
     word_ids = tf.range(len(self.vocab), dtype=tf.int64)
     vocab_init = tf.lookup.KeyValueTensorInitializer(words, word_ids)
     self.table = tf.lookup.StaticVocabularyTable(vocab_init,
                                                  self.n_oov_buckets)
Esempio n. 32
0
    def estimate_median_dist(self):
        """Estimate the median distance between cells.
        This is used to compute 
        """
        # Randomly selected indices
        if self.nsamples >= self.data.shape[1]:
            index = np.random.choice(self.data.shape[0], self.nsamples, replace = False)
            x = self.data[index,:]
        else:
            index = np.range(0,self.data.shape[0])
            x = self.data
        
        # which ell_p norm is used

        if self.use_KD_tree:
            # We need to take the first two points (k=2), since distance of the point
            # to itself is zero.
            (dist, i) = self.kd_tree.query(x, k=2, p = self.distance_metric)
            dist = dist[:,1] 
        else:
            dist = np.zeros(self.nsamples)
            d = np.zeros(self.data.shape[0])
            for j in range(self.nsamples):
                err = (np.abs(x[j] - self.data))**distance_metric
                np.sum(err,axis=1,out=d) 
                # give infinite distance to the point with itself
                d[index[j]] = float('inf')
                dist[j] = d.min()
        
        self.median_dist = np.median(dist)
    
        if self.distance_threshold is None:
            self.distance_threshold =  self.alpha*self.median_dist   

        return self.median_dist
 def subtract_points(self, indices_to_subtract):
     ''' Subtract points by indices and return a new sub cloud. '''
     num_points = np.asarray(self.points).shape[0]
     all_indices = np.range(num_points)
     rest_indices = np.setdiff1d(all_indices, indices_to_subtract)
     rest_pcd = self.select_points(rest_indices)
     return rest_pcd
Esempio n. 34
0
 def remove(self, value):
     for x in range(len(self)):
         if self[x] == value:
             self.pop(x)
             break
     else:
         raise AttributeError
Esempio n. 35
0
def pendulum():
    m = 3.0 # mass
    g = 9.8 # acceleration due to gravity
    r = 2.0 # radius (length)
    I = 12.0 # moment of Inertia
    dt = .0025 # step size
    l = 2.0
    c = 10 # number of cycles
    t = np.range(0, c, dt) # range of iterations for each step size
    n = len(t) # number of iterations
    y = np.zeros(n) # y coordinates
    v = np.zeros(n) # velocity values
    theta0 = 90 # initial angle
    # delta_theta(T) # velocity across the time interval. Still working on this
    for i in range(0, n-1):
        """
        Calculate Runge-Kutta formulations for each time point (step)
        """
        k1y = h*v[i]
        k1v = h*a(y[i])

        k2y = h*(v[i] + .5*k1v)
        k2v = h*a(y[i] + .5*k1y)

        k3y = h*(v[i] + .5*k2v)
        k3v = h*a(y[i] + .5*k2y)

        k4y = h*(v[i] + k3v)
        k4v = h*a(y[i] + k4y)

        y[i+1] = y[i] + (k1y + 2 * k2y + 2 * k3y + k4y) / 6.0
        v[i+1] = v[i] + (k1v + 2 * k2v + 2 * k3v + k4v) / 6.0
Esempio n. 36
0
 def diffusion_map_embedding(self, n_dims=2, t=1):
     # if self.W is None:
     #    self.eigsys()
     vals, vecs = sp.linalg.eigs(self.P.T, k=n_dims + 1, which="LR")
     ind = np.range(1, n_dims + 1)
     Y = np.real(vals[ind] ** t) * np.real(vecs[:, ind])
     return Y
Esempio n. 37
0
def generate_data(configs):
    series = []
    for config in configs:
        data = np.zeros(config['data_size'])
        for i in range(0, config['data_size']):
            data[i] = config['function'](i)
        series.append(data)

    data_sum = np.sum(series, axis=0)
    return data_sum, np.range(len(data_sum))
Esempio n. 38
0
	def reorder(self):
		perm=list(np.range(len(self.qns)))
		qns_perm=[(qn,per) for qn in self.qns for per in perm]
		qns_perm=sorted(qns_perm key=lamda x:x[0])
		self.qns=[]
		perm=[]
		for qn,per in qns_perm:
			self.qns.append(qn)
			perm.append(per)
		return perm
def PSTH_phaseb(cycle_TS, cell, ret='xTrial'):
    ntrials, ie, nbins = np.shape(cycle_TS)
    inh = []
    exh = []
    cA = np.range((cell >= cycle_TS[0].min()).nonzero()[0].min()-1,
                  (cell <= cycle_TS[-1].max()).nonzero()[0].max()+1)
    for j in range(ntrials):
        cran = [cycle_TS[j].min(), cycle_TS[j].max()]
        cT = np.range((cell[cA] >= cran[0]).nonzero()[0].min()-1,
                      (cell[cA] <= cran[1]).nonzero()[0].max()+1)
        for i in range(nbins-1):
            inh.append(sum((cell[cT] > cycle_TS[j][0][i]) &\
                           (cell[cT] < cycle_TS[j][0][i+1])))
            exh.append(sum((cell[cT] > cycle_TS[j][1][i]) &\
                           (cell[cT] < cycle_TS[j][1][i+1])))
    inh, exh = [np.array(arr) for arr in [inh, exh]]
    if ret == 'xSes':
        psth = np.hstack((np.sum(inh, axis=0), np.sum(exh, axis=0)))
    elif ret == 'xTrial':
        psth = np.hstack((inh, exh))
    return psth
 def cmptMass(self, mMax, mMin):
     """ 
     compute mass, considering mass lost due to oxidation.  
     Args:
         mMax: maximun mass.
         mMin: mass after burn-out.           
     return:
         m: mass, shape(1, self.Nt)
     """
     k = (mMax- mMin)/self.tm
     m = []
     for (i, t) in zip(np.range(self.Nt), self.flight_time):
         m[i] = (mMax- k*t) if (t<self.tm) else mMin
     return np.ndarray(m)
Esempio n. 41
0
def get_random_voxels(dataset, n_voxels):
  """ Returns mappable data for a random subset of voxels.

  May be useful as a baseline in predictive analyses--e.g., to compare performance 
  of a more principled feature selection method with simple random selection.

  Args:
    dataset: A Dataset instance
    n_voxels: An integer specifying the number of random voxels to select.

  Returns:
    A 2D numpy array with (randomly-selected) voxels in rows and mappables in columns.
  """
  voxels = np.range(dataset.volume.num_vox_in_mask)
  selected = np.random.shuffle(voxels)[0:n_voxels]
  return dataset.get_image_data(voxels=selected)
Esempio n. 42
0
    def get_data(self,channels = 'all', beg_time = 0, final_time='end'):
        if channels == 'all':
            channels = np.range(self.channels)
        
        beg_sample = int(np.floor(beg_time* self.fs))
        if self.total_samples < beg_sample:
            raise NameError('beg_time out of data')
            
        beg_file = int(beg_sample/self.samples4file)
        rel_beg_sample = beg_sample - beg_file * self.samples4file
        
        if final_time == 'end':
            final_sample = self.total_samples
        else:
            final_sample = int(np.ceil(final_time* self.fs))
        
        if final_sample <= beg_sample:
            raise NameError('final < beginning')
        
        final_file = int(final_sample/self.samples4file)
        rel_final_sample = final_sample - final_file * self.samples4file
        data = np.ndarray([len(channels),final_sample-beg_sample])
        if beg_file != final_file:
           #inicio:
            new_file = self.reg_filename + "-" + str(beg_file+1) #plus 1, because data begun in 1
            new_data = np.fromfile(new_file,np.int16)
            data[:, :self.samples4file - rel_beg_sample] = new_data.reshape([self.channels,new_data.size/self.channels],order='F')[channels,rel_beg_sample:]
            writed_samples = self.samples4file - rel_beg_sample
             
            #final
            new_file = self.reg_filename + "-" + str(final_file+1)#plus 1, because data begun in 1
            new_data = np.fromfile(new_file,np.int16)
            data[:, -rel_final_sample:] = new_data.reshape([self.channels,new_data.size/self.channels],order='F')[channels, :rel_final_sample]
        else:
            new_file = self.reg_filename + "-" + str(beg_file+1) #plus 1, because data begun in 1
            new_data = np.fromfile(new_file,np.int16)
            data = new_data.reshape([self.channels,new_data.size/self.channels],order='F')[channels,rel_beg_sample:rel_final_sample]
            writed_samples = self.samples4file - rel_beg_sample
       
 
        for i in range(beg_file+1,final_file+1): #50
            new_file = self.reg_filename + "-" + str(i+1)#plus 1, because data begun in 1
            new_data = np.fromfile(new_file, np.int16)
            data[:, writed_samples:writed_samples + self.samples4file]  = self.adc_scale * new_data.reshape([self.channels, new_data.size/self.channels], order='F')[channels, :]
            
        return data
def getDisplacement(Image0, Image1):
    Image0Gray = rgb2gray(Image0)
    Image1Gray = rgb2gray(Image1)
    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(Image0Gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(Image1Gray)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # Sort the matches based on distance.  Least distance
    # is better
    distances12 = []
    for match in matches12:
        distance = hamming(descriptors1[match[0]], descriptors2[match[1]])
        distances12.append(distance)

    indices = np.range(len(matches12))
    indices = [index for (_, index) in sorted(zip(distances12, indices))]
    matches12 = matches12[indices]

    # collect displacement from the first 10 matches
    dxList = []
    dyList = []
    for mat in matches12[:10]:
        # Get the matching keypoints for each of the images
        img1_idx = mat[0]
        img2_idx = mat[1]

        # x - columns
        # y - rows
        (x1, y1) = keypoints1[img1_idx]
        (x2, y2) = keypoints2[img2_idx]
        dxList.append(abs(x1 - x2))
        dyList.append(abs(y1 - y2))

    dxMedian = np.median(np.asarray(dxList, dtype=np.double))
    dyMedian = np.median(np.asarray(dyList, dtype=np.double))
    plot_matches(Image0, Image1, descriptors1, descriptors2, matches12[:10])
    return dxMedian, dyMedian
Esempio n. 44
0
File: qgl.py Progetto: cluquin/dcode
 def spin_res_trotter_evolve(self, L, R, Omega, delta0, J, dt, nmax, meas, t0 = 0):
     dt = dt
     M = nmax
     meas = meas
     dim = len(self.curr_state)
     meas.measure(self.curr_state, t0)
     layers, operators = spin_res_ops(L, R, Omega, delta0, J)
     layers = np.asarray([np.array(layer.ll) for layer in layers.ll])-1
     propagators = build_spin_res_propagators(operators, dt)
     ns = range(R+1) + list(np.abs(R-1-np.range(R)))
     
     for m in range(1, M):
         for n in ns:
             for i in range(len(layers[n])):
                 self.curr_state = op_on_state(propagators[n][i], list(layers[n][i]), 
                                               self.curr_state)
         meas.measure(self.curr_state, t0 + m*dt)
     meas.write_out()
    def train(self,X, Y):
        '''
            Trains a weak learner from all numerical attribute for all possible split points for
            possible feature selection
            
            Input:
            ---------
            X: a [m x d]  features matrix
            Y: a [m x 1] labels matrix
            
            Returns:
            ----------
            v: splitting threshold
            score: splitting score
            Xlidx: Index of examples belonging to left child node
            Xridx: Index of examples belonging to right child node
            
        '''
        nexamples,nfeatures=X.shape

        
        if(not self.nrandfeat):
            self.nrandfeat=np.round(np.sqrt(nfeatures))

        #-----------------------TODO-----------------------#
        #--------Write Your Code Here ---------------------#
        
        score = 0
        
        req_Split_point = np.array([])
        
        self.fidx = np.random.random(0,nfeatures,self.nrandfeat)
        
        for fid in fidx:
            self.rf = np.range(X[:,fid])
            splitvalue,minscore,bXl,bXr = findBestRandomSplit(fid,Y)
            if(minscore>score):
                req_Split_point = splitvalue
                score = minscore
                
        
        #---------End of Your Code-------------------------#
        return score, bXl,bXr
Esempio n. 46
0
def recunpack(rec):
    fields = rec.dtype.names
    nrec = len(rec)
    temprec = copy(rec)
    for field in fields:
        if type(newrec[field]) is not np.ndarray:
            rec = mlab.rec_drop_fields(rec,field)


    newrec = rec[0]
    
    for field in fields:
        
        if type(newrec[field]) is np.ndarray:
            l = len(newrec[field])
            newrec[field] = resize(newrec[field],(nrec,l))
            #fill in other elements
            for i in np.range(nrec):
                newrec[field][i] = rec[field][i]
        else:                
Esempio n. 47
0
    def _plot_plotly(self, layout):
        from plotly import graph_objs

        if self.sortby is not None:
            inds = numpy.argsort(self.data[self.sortby].values())[::-1]
        else:
            inds = numpy.range(len(self.data[self.data.keys()[0]]))

        data = []
        for label, sample in self.data.items():
            color = _COLOR_CYCLE.next()
            data.append({
                'name': label,
                'x': numpy.array(sample.keys())[inds],
                'y': numpy.array(sample.values())[inds],
                'type': 'bar',
                'opacity': 0.5,
                'marker': {'color': color}
            })
        fig = graph_objs.Figure(data=graph_objs.Data(data), layout=layout)
        return fig
Esempio n. 48
0
def plot_tsne_selection_grid(z_pos, x_pos, z_neg, vmin, vmax, grid_size, fig_path,
							 labels=None, fig_size=(9,9), suffix='png', text_annot=None):
	ncol = x_pos.shape[1]
	if labels is None:
		labels = [str(a) for a in np.range(ncol)]
	
	fig = plt.figure(figsize=fig_size)
	fig.clf()
	g_i, g_j = grid_size
	grid = ImageGrid(fig, 111, 
					 nrows_ncols=(g_i, g_j),
					 ngrids = ncol,
					 aspect=True,
					 direction="row",
					 axes_pad=(0.15, 0.5),
					 add_all=True,
					 label_mode="1",
					 share_all=True,
					 cbar_location="top",
					 cbar_mode="each",
					 cbar_size="8%",
					 cbar_pad="5%",
					 )
	for seq_index, ax in enumerate(grid):
		ax.text(0, .92, labels[seq_index],
				horizontalalignment='center',
				transform=ax.transAxes, size=20, weight='bold')
		a = x_pos[:, seq_index]
		ax.scatter(z_neg[:,0], z_neg[:,1], s=.5, marker='o', c='lightgray',
					alpha=0.5, edgecolors='face')                
		im = ax.scatter(z_pos[:,0], z_pos[:,1], s=.5, marker='o', c=a, cmap=cm.jet,
						edgecolors='face', vmin=vmin[seq_index], vmax=vmax[seq_index])
		ax.cax.colorbar(im)            
		clean_axis(ax)
		ax.grid(False)
  
	plt.savefig('.'.join([fig_path, suffix]), format=suffix)
	plt.clf()
	plt.close()
Esempio n. 49
0
#
#
# A = np.vstack([x, np.ones(len(x))]).T
# m, c = np.linalg.lstsq(A, y)[0]
#
# print "Increasing" if m > 0 else "Decreasing"
# print np.poly1d((m, c))
#
# plt.plot(x, y, 'o', label='Original data', markersize=10)
# plt.plot(x, m*x + c, 'r', label='Fitted line')
# plt.legend()
# plt.show()


y = np.linspace(0, 99, 100) + 5 + np.random.random(100) * 100
x = np.range(100)


def pearsonr(x, y):
    """
    Pearson product-moment correlation coefficient, measure of the linear correlation between
        two variables X and Y, giving a value between +1 and −1 inclusive, where 1 is total
        positive correlation, 0 is no correlation, and −1 is total negative correlation.
    :param x:
    :param y:
    :return:
    """
    xm, ym = np.mean(x), np.mean(y)

    xn, yn = x - xm, y - ym
    return np.sum(xn * yn) / math.sqrt(np.sum(xn * xn) * np.sum(yn * yn))
import numpy as np 
from scipy.signal import hann, convolve
from CGLS import CGLS

step = np.repeat([0.,1.,0.], 1000)
sine = np.sin(np.range(0, 785)/1000)
clean_signal = convolve(step, sine, mode='same')

noisy_signal = convolve(clean_signal, gaussian(25, 2*np.max(clean_signal)), mode='same')
noisy_signal = noisy_signal/np.max(noisy_signal)


s = len(noisy_signal)//5

samples = [noisy_signal[int(k*s)] for k in range(1, 5)]


Esempio n. 51
0
#s_new = 1-  B * (np.exp(-k * d_t) * np.power(((1 - s_old)/B),-k))

s_new = 1- ( (1-s_old)*np.exp(-k * d_t) )


plt.plot(s_old, s_new)

"""
#plt.show()

sz= 1-B
time= [0]
size = [sz]


for i in np.range(1,50):
    sz = 1- ( (1-sz)*np.exp(-k * d_t) )
    time.append(i)
    size.append(sz)
    

print time
print size


plt.plot(time, size)
plt.title("last graph")
plt.show()


Esempio n. 52
0
    # data_simple,
    data_complex_trend,
    data_variation,
    # data_reversed_trend,
    data_trend_jump,
]
for config in configs:
    data.append(generate_data(config))

for f in filters:
    index = filters.index(f)
    filtered.append([])
    for x, y in data:
        print "Filtering data using {}".format(f.func_name)
        x_ = f(x)
        y_ = np.range(len(x_)) if type(x_) is not tuple else ([range(len(x_[0]))] * len(x_))
        filtered[index].append((x_, y_))

dim = Subplots.get_dimensions(len(data))
figure, subplots = plt.subplots(*dim, sharex=True, sharey=True)
plots = Subplots(subplots)
plots.grid(True)

colors.reset()
for i in range(len(data)):
    c = Colors.create(colors.next(), len(filters))
    plots[i].plot(data[i][1], data[i][0], '.-', c=c.next(), lw=0.1)
    legends = [mpatches.Patch(color=c.prev(), label=configs[i].name)]

    for f in filters:
        j = filters.index(f)
import numpy as np


def main():
    from load import load
    import matplotlib.pyplot as plt

    im = load('data/doodle.txt')
    plt.imshow(im)
    plt.show()

    from tom import sol1
    test = sol1(im)
    return test


if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser(description='Google Hash Code in Paris!')
    parser.add_argument('--hw', action='store_true')
    args = parser.parse_args()

    if args.hw:
        print('Hello world!')
        np.range(2)
    t = main()
Esempio n. 54
0
 def createlevel(self,resolution,w=50,h=10,b=5):
   bricks = []
   for x in range(resolution[0]*.2,resolution[0]*.8,w+b):
     for y in range(resolution[1]*.2, resolution[1]*.4,h+b): 
       bricks.append(pygame.Rect((x,y,w,h)))
   return bricks
Esempio n. 55
0
 def indices(self):
     return (self.terms,
             np.range(self.nsrc),
             np.range(self.nrec),
             )
Esempio n. 56
0
""""
`map()` is an in-built function in Python 
""""
def sqrt_it(iter):
  return map(lambda x:x**2, iter)
try:
  from numpy import arange as range
except ImportError:
  pass

seq = range(10)
print(sqrt_it(seq))
for i in sqrt_it(seq):
  print(i)
Esempio n. 57
0
def shuffle_data(data):
    positions = np.range(0, data.X.shape[0])
    np.random.shuffle(positions)
    data.X = data.X[positions]
    data.y = data.y[positions]
    return data
#compute centers of parabola (which is the actual peak frequency)
k_int = peaks + 0.5 * (a - c) / (a - 2 * b + c)
#compute the magnitude at the interpolated frequencies
mX_int = b - 0.25 * (k_int - peaks) * (a - c)
#compute interpolated phase values
pX_int = np.interp(peaks, np.arange(0, pX.size), pX)

#SYNTHESIS----------------------------------------------
#function to generate sinc function
def sinc(x, N):
    y = np.sin(N * x / 2) / np.sin(x / 2)
    y[np.isnan(y)] = N
    return y

#x array to generate the main lobe
x_lobe = np.range(-4, 3, 1)
f = x_lobe * np.pi * 2 / FFT_size
df_discrete = 2 * np.pi / FFT_size
bh_mainlobe = np.zeros(x_lobe.size)
consts = [0.35875, 0.48829, 0.14128, 0.01168]
for m in range(0, 4):
    bh_mainlobe += consts[m] / 2 * (sinc(f - df_discrete * m, FFT_size) + sinc(f + df_discrete *m, FFT_size))
bh_mainlobe = bh_mainlobe / FFT_size / consts[0]

#generate one main lobe at each detected frequency peak


#generate frequency axis data
freq = np.arange(0, fs/2 + df, df)
#plot the magnitude spectrum with the detected peaks marked
# plt.plot(freq, mX)
Esempio n. 59
0
	def solve(self, non_linear=True):
		""" Solve thermal conduction implicitely using finite differences """
		t = time.clock()
		from scipy.sparse import coo_matrix, csr_matrix
		from scipy.sparse.linalg import spsolve

		def writeMatrix(Ir, Jr, Vr):
			""" Writes nodes to a sparse tridiagonal matrix """
			global I, J, V, nPos
			I[nPos] = Ir
			J[nPos] = Jr
			V[nPos] = Vr
			nPos += 1

		nx, ny = len(self.points_x), len(self.points_y)
		Nnode = nx*ny

		global I, J, V, nPos
		I = np.zeros(Nnode*5-3, dtype=int)
		J = np.zeros(Nnode*5-3, dtype=int)
		V = np.zeros(Nnode*5-3)
		nPos = 0
		matC = np.zeros(Nnode)

		adx = 1.0 / (2*self.dx**2)
		ady = 1.0 / (2*self.dy**2)

		residual = 10.0
		nIter = 0
		while residual > 1e-6:
			nIter += 1
			nPos = 0
			index = np.range(Nnode, dtype=int)
			remove = []
			temperature_last = self.temperature

			## BCs ##
			# Top and Bottom BCs
			for pos in range(0, nx):
				jBC = pos
				if self.topFlux:
					writeMatrix(jBC, jBC, (self.conductivity[-1,pos]+self.conductivity[-2,pos])*-ady)
					writeMatrix(jBC, jBC+nx, (self.conductivity[-1,pos]+self.conductivity[-2,pos])*ady)
					matC[jBC] = (self.heatProduction[-1,pos]+self.heatProduction[-2,pos]) / 2.0 - self.topBC/self.dy
				else:
					writeMatrix(jBC, jBC, 1.0)
					matC[jBC] = -300. #Top BC
				remove.append(jBC)

				jBC += Nnode-nx
				if self.bottomFlux:
					writeMatrix(jBC, jBC, (self.conductivity[-1,pos]+self.conductivity[-2,pos])*-ady)
					writeMatrix(jBC, jBC-nx, (self.conductivity[-1,pos]+self.conductivity[-2,pos])*ady)
					matC[jBC] = (self.heatProduction[-1,pos]+self.heatProduction[-2,pos]) / 2.0 - self.bottomBC/self.dy
				else:
					writeMatrix(jBC, jBC, 1.0)
					matC[jBC] = self.bottomBC *-1
				remove.append(jBC)

			# Left and Right BCs
			for pos in range(1, ny-1):
				iBC = pos*nx
				if self.leftFlux:
					writeMatrix(iBC, iBC, (self.conductivity[pos,0]+self.conductivity[pos,1])*adx)
					writeMatrix(iBC, iBC+1, (self.conductivity[pos,0]+self.conductivity[pos,1])*-adx)
					matC[iBC] = (self.heatProduction[pos,0]+self.heatProduction[pos,1]) / 2.0 - self.leftBC/self.dx
				else:
					writeMatrix(iBC, iBC, 1.0)
					matC[iBC] = self.leftBC *-1
				remove.append(iBC)

				iBC += nx-1
				if self.rightFlux:
					writeMatrix(iBC, iBC, (self.conductivity[pos,-1]+self.conductivity[pos,-2])*adx)
					writeMatrix(iBC, iBC-1, (self.conductivity[pos,-1]+self.conductivity[pos,-2])*-adx)
					matC[iBC] = (self.heatProduction[pos,-1]+self.heatProduction[pos,-2]) / 2.0 - self.rightBC/self.dx
				else:
					writeMatrix(iBC, iBC, 1.0)
					matC[iBC] = self.rightBC *-1
				remove.append(iBC)

			index = np.delete(index, remove)

			pos = 0
			for r in range(1, ny-1):
				for c in range(1, nx-1):
					Di = [self.conductivity[r,c-1], self.conductivity[r,c], self.conductivity[r,c+1]]
					Dj = [self.conductivity[r-1,c], self.conductivity[r,c], self.conductivity[r+1,c]]

					writeMatrix(index[pos], index[pos]-nx, (Dj[0]+Dj[1])*ady)
					writeMatrix(index[pos], index[pos]-1, (Di[0]+Di[1])*adx)
					writeMatrix(index[pos], index[pos], (Di[0]+2*Di[1]+Di[2])*-adx + (Dj[0]+2*Dj[1]+Dj[2])*-ady)
					writeMatrix(index[pos], index[pos]+1, (Di[2]+Di[1])*adx)
					writeMatrix(index[pos], index[pos]+nx, (Dj[2]+Dj[1])*ady)

					matC[index[pos]] = self.heatProduction[r,c]
					pos += 1

			A = coo_matrix((V, (I, J))).tocsr()
			b = coo_matrix((np.array(matC)*-1), shape=(Nnode,1)).T
			temperature = spsolve(A, b.tocsr())

			self.temperature = np.reshape(temperature, (ny, nx))
			if non_linear:
				self.conductivity = nonLinearConductivity(temperature)

			residual = np.absolute(np.array(temperature)-np.array(temperature_last)).max()

			if self.verbose:
				print "[Iteration %i] residual - %.04f, walltime - %0.2f" % (nIter, residual, time.clock()-t)

		if non_linear:
			return self.temperature, self.conductivity
		else:
			return self.temperature