Ejemplo n.º 1
0
    def create_mask(self,u0):
        st=self.st
        print('u0.shape',u0.shape)
        rows=u0.shape[0]
        cols=u0.shape[1]

        kk = xrange(0,rows)
        jj = xrange(0,cols)

        kk = CsTransform.pynufft.appendmat(kk,cols)
        jj = CsTransform.pynufft.appendmat(jj,rows).T
        st['mask']=numpy.ones((rows,cols),dtype=numpy.float32)

        #add circular mask
        sp_rat=(rows**2+cols**2)*1.0
        
#         for jj in xrange(0,cols):
#             for kk in xrange(0,rows):
#                 if ( (kk-rows/2.0)**2+(jj-cols/2.0)**2 )/sp_rat > 1.0/8.0:
#                     st['mask'][kk,jj] = 0.0
        
        if numpy.size(u0.shape) > 2:
            for pp in range(2,numpy.size(u0.shape)):
                st['mask'] = CsTransform.pynufft.appendmat(st['mask'],u0.shape[pp] )
 
        return st
Ejemplo n.º 2
0
    def simulate(self, prices, molecule, span):
        """
        Derive labels from the sign of t-value of the linear trend
        :param prices: Time series of {x_t}
        :type prices: array like
        :param molecule: the index of the observations we wish to label
        :type molecule: array like
        :param span: set of values of L, the look forward period
        :type span: array like
        """
        out = pd.DataFrame(index=molecule, columns=['tl', 'tval', 'bin'])
        horizons = np.xrange(*span)
        for dt in molecule:
            df = pd.Series()
            iloc = prices.index.get_loc(dt)
            if iloc + max(horizons) > prices.shape[0]:
                continue
            for horizon in horizons:
                dt1 = prices.index[iloc + horizon - 1]
                df1 = prices.loc[dt:dt1]
                df.loc[dt1] = t_value_lin(df1.values)

            dt1 = df.replace([-np.inf, np.inf, np.nan], 0).abs().idxmax()
            out.loc[dt,
                    ['tl', 'tval', 'bin']] = df.index[-1], df[dt1], np.sign(
                        df[dt1])
        out['tl'] = pd.to_datetime(out['tl'])
        out['bin'] = pd.to_numeric(out['bin'], downcast='signed')
        return out.dropna(subset=['bin'])
Ejemplo n.º 3
0
def compFq(rms, qs):
    """Compute scaling function F as:

      F[scale] = pow(mean(RMS[scale]^q),1.0/q)

    This function computes F for all qs at each scale.
    The result is a 2d NxM array (N = rms.shape[0], M = len(qs))

    Parameters
    ----------
    rms:    the RMS 2d array (RMS for scales in rows) computer by compRMS or fastRMS
    qs:     an array of q coefficients

    Example
    -------
    # >>> X = cumsum(0.1*randn(8000))
    # >>> scales = (2**arange(4,10)).astype('i4')
	# >>> RMS = fastRMS(X,scales)
    # >>> qs = arange(-5,5.1,1.0)
    # >>> loglog(scales,compFq(RMS,qs),'.-')

    """
    out = zeros((rms.shape[0], len(qs)), 'f8')
    mRMS = np.ma.array(rms, mask=np.isnan(rms))
    for qi in np.xrange(len(qs)):
        p = qs[qi]
        out[:, qi] = (mRMS**p).mean(1)**(1.0 / p)
    out[:, qs == 0] = np.exp(0.5 * (np.log(mRMS**2.0)).mean(1))[:, None]
    return out
Ejemplo n.º 4
0
def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
  """Runs one evaluation against the full epoch of data.
  Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    images_placeholder: The images placeholder.
    labels_placeholder: The labels placeholder.
    data_set: The set of images and labels to evaluate, from
      input_data.read_data_sets().
  """
  # And run one epoch of eval.
  true_count = 0  # Counts the number of correct predictions.
  steps_per_epoch = int(data_set.num_examples / FLAGS.batch_size)
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in np.xrange(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set,
                               images_placeholder,
                               labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
  precision = float(true_count) / float(num_examples)
  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision))
Ejemplo n.º 5
0
def download_all_shot_numbers(prepath, save_path, shot_list_files,
                              signals_full):
    max_len = 30000
    machine = shot_list_files.machine
    signals = []
    for sig in signals_full:
        if not sig.is_defined_on_machine(machine):
            print("Signal {} not defined on machine {}, omitting".format(
                sig, machine))
        else:
            signals.append(sig)
    save_prepath = prepath + save_path + "/"
    shot_numbers, _ = shot_list_files.get_shot_numbers_and_disruption_times()
    # can only use queue of max size 30000
    shot_numbers_chunks = [
        shot_numbers[i:i + max_len]
        for i in np.xrange(0, len(shot_numbers), max_len)
    ]
    start_time = time.time()
    for shot_numbers_chunk in shot_numbers_chunks:
        download_shot_numbers(shot_numbers_chunk, save_prepath, machine,
                              signals)

    print("Finished downloading {} shots in {} seconds".format(
        len(shot_numbers),
        time.time() - start_time))
Ejemplo n.º 6
0
    def assignCrowdingDist(self,individuals):
        """Assign a crowding distance to each individual's fitness. The
        crowding distance can be retrieve via the :attr:`crowding_dist`
        attribute of each individual's fitness.
        """
        if len(individuals) == 0:
            return

        distances = [0.0] * len(individuals)
        crowd = [(ind.fitness.values, i) for i, ind in enumerate(individuals)]

        nobj = len(individuals[0].fitness.values)

        for i in np.xrange(nobj):
            crowd.sort(key=lambda element: element[0][i])
            distances[crowd[0][1]] = float("inf")
            distances[crowd[-1][1]] = float("inf")
            if crowd[-1][0][i] == crowd[0][0][i]:
                continue
            norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i])
            for prev, cur, next in zip(crowd[:-2], crowd[1:-1], crowd[2:]):
                distances[cur[1]] += (next[0][i] - prev[0][i]) / norm

        for i, dist in enumerate(distances):
            individuals[i].fitness.crowding_dist = dist
Ejemplo n.º 7
0
def aperture(startpx, startpy, radius, nRows, nCols):
    r = radius
    length = 2 * r
    height = length
    allx = np.xrange(startpx - int(np.ceil(length / 2.0)),
                     startpx + int(np.floor(length / 2.0)) + 1)
    ally = np.xrange(startpy - int(np.ceil(height / 2.0)),
                     startpy + int(np.floor(height / 2.0)) + 1)
    mask = np.zeros((nRows, nCols))

    for x in allx:
        for y in ally:
            if (np.abs(x - startpx))**2 + (np.abs(y - startpy))**2 <= (
                    r)**2 and 0 <= y and y < nRows and 0 <= x and x < nCols:
                mask[y, x] = 1.
    return mask
Ejemplo n.º 8
0
def cal(row,trainLabel):
    global w, b
    res = 0
    for i in np.xrange(len(row)):
        res += row[i] * w[i]
    res += b
    res *= trainLabel
    return res
Ejemplo n.º 9
0
def group_list(l, group_size):
    """
    :param l: list or sequence
    :param group_size:
    :return: batch
    """
    for i in np.xrange(0, len(l), group_size):
        yield l[i:i + group_size]
Ejemplo n.º 10
0
def find(value, bin_edges):
    """
    helper function for variogram.
    """
    for k in np.xrange(len(bin_edges)):
        if value<bin_edges[k]:
            break
    return k-1
Ejemplo n.º 11
0
def recurring_monthly(start_date, stop_date, base_string):
    starting = dt.strptime(start_date, '%m/%d/%y')
    dom = "{:02d}".format(int(start_date.split('/')[1]))
    ending = dt.strptime(stop_date, '%m/%d/%y')
    desc_list = [
        base_string + ' ' + dt.strptime('%2.2d-%2.2d' %
                                        (y, m), '%Y-%m').strftime('%b-%y')
        for y in xrange(starting.year, ending.year + 1)
        for m in xrange(starting.month if y == starting.year else 1,
                        ending.month + 1 if y == ending.year else 13)
    ]
    time_list = [
        dt.strptime('%2.2d-%2.2d' % (y, m), '%Y-%m').strftime('%y/%m/?')
        for y in xrange(starting.year, ending.year + 1)
        for m in xrange(starting.month if y == starting.year else 1,
                        ending.month + 1 if y == ending.year else 13)
    ]
    time_list = [x.replace('?', dom) for x in time_list]
    return time_list, desc_list
Ejemplo n.º 12
0
 def import_raw_file(self, file):
     reader = open(self.file)
     row_offset = 2
     data = []
     with open(file, 'r') as txt_in:
         for i in np.xrange(row_offset):
             txt_in.next()
         for line in txt_in:
             data.append(line.split())
     data = np.array(data, dtype=np.float64)
     return data
Ejemplo n.º 13
0
def parent_path(depth=1):
    """
    Return path to directory which is depth
    levels above
    """
    path = os.path.abspath(__file__)
    n = 0
    for j in np.xrange(1, len(path)+1):
        if path[-j] == "/":
            n += 1
        if n == depth:
            return path[:len(path)-j]
Ejemplo n.º 14
0
 def next_batch(self, batch_size, fake_data=False):
     """Return the next `batch_size` examples from this data set."""
     if fake_data:
         fake_image = [1.0 for _ in np.xrange(784)]
         fake_label = 0
         return [fake_image for _ in np.xrange(batch_size)
                 ], [fake_label for _ in np.xrange(batch_size)]
     start = self._index_in_epoch
     self._index_in_epoch += batch_size
     if self._index_in_epoch > self._num_examples:
         # Finished epoch
         self._epochs_completed += 1
         # Shuffle the data
         perm = np.arange(self._num_examples)
         np.random.shuffle(perm)
         self._images = self._images[perm]
         self._labels = self._labels[perm]
         # Start next epoch
         start = 0
         self._index_in_epoch = batch_size
         assert batch_size <= self._num_examples
     end = self._index_in_epoch
     return self._images[start:end], self._labels[start:end]
Ejemplo n.º 15
0
def perceptionClassify(trainGroup, trainLabels):
    global w, b
    isFind = False  # the flag of find the best w and b
    numSamples = trainGroup.shape[0]
    mLength = trainGroup.shape[1]
    w = [0]* mLength
    b = 0
    while(not isFind):
        for i in np.xrange(numSamples):
            if cal(trainGroup[i],trainLabels[i]) <= 0:
                print("w: " + w + " b: " + b)
                update(trainGroup[i],trainLabels[i])
                break    #end for loop
            elif i == numSamples-1:
                print("w: " + w + " b: " + b)
                isFind = True   #end while loop
Ejemplo n.º 16
0
    def selTournamentDCD(self,individuals, k):
        """Tournament selection based on dominance (D) between two individuals, if
        the two individuals do not interdominate the selection is made
        based on crowding distance (CD). The *individuals* sequence length has to
        be a multiple of 4. Starting from the beginning of the selected
        individuals, two consecutive individuals will be different (assuming all
        individuals in the input list are unique). Each individual from the input
        list won't be selected more than twice.
        This selection requires the individuals to have a :attr:`crowding_dist`
        attribute, which can be set by the :func:`assignCrowdingDist` function.
        :param individuals: A list of individuals to select from.
        :param k: The number of individuals to select.
        :returns: A list of selected individuals.
        """

        if len(individuals) % 4 != 0:
            raise ValueError("selTournamentDCD: individuals length must be a multiple of 4")

        if k % 4 != 0:
            raise ValueError("selTournamentDCD: number of individuals to select must be a multiple of 4")

        def tourn(ind1, ind2):
            if ind1.fitness.dominates(ind2.fitness):
                return ind1
            elif ind2.fitness.dominates(ind1.fitness):
                return ind2

            if ind1.fitness.crowding_dist < ind2.fitness.crowding_dist:
                return ind2
            elif ind1.fitness.crowding_dist > ind2.fitness.crowding_dist:
                return ind1

            if random.random() <= 0.5:
                return ind1
            return ind2

        individuals_1 = random.sample(individuals, len(individuals))
        individuals_2 = random.sample(individuals, len(individuals))

        chosen = []
        for i in np.xrange(0, k, 4):
            chosen.append(tourn(individuals_1[i],   individuals_1[i+1]))
            chosen.append(tourn(individuals_1[i+2], individuals_1[i+3]))
            chosen.append(tourn(individuals_2[i],   individuals_2[i+1]))
            chosen.append(tourn(individuals_2[i+2], individuals_2[i+3]))

        return chosen
Ejemplo n.º 17
0
    def predict(self, X):
        """ X is N x D where each row is an example we wish to predict label for """
        num_test = X.shape[0]
        # let's make sure that the output type matches the input type
        Ypred = np.zeros(num_test, dtype=self.ytr.dtype)

        # loop over all test rows
        for i in np.xrange(num_test):
            # find the nearest training image to the i'th test image
            # using the L1 distance (sum of absolute value difference)
            distances = np.sum(np.abs(self.Xtr - X[i, :]), axis=1)
            min_index = np.argmin(
                distances)  # get the index with smallest distance
            Ypred[i] = self.ytr(
                min_index)  # predict the label of the nearest example

        return Ypred
def compare_channel_replicates(data,
                               group=True,
                               title='',
                               col_groups=None,
                               cross=False):
    """ 
    Plot (ncols x ncols) grid of scatterplots comparing the measures in each column
    data must have 'accession_number' column to aggregate by if group is True
    col_groups is a list of name-value pairs to split the plot into several
        saved figures, where name is the label of the group and value is a 
        list of columns
        OR one of 'Mar', 'Aug', 'Sep', 'Tyr'
    cross [bool] - designates whether to include one panel with cross of all channels
        Note that cross will not with with group

    """
    # Default column groups for each dataset
    if col_groups is None:
        col_groups = [('all', np.xrange(data.shape[1]))]
    elif isinstance(col_groups, basestring):
        if col_groups == 'Aug':
            col_groups = [
                ('Control', ['GFP_A1', 'GFP_A2', 'GFP_B1', 'GFP_B2']),
                ('KO93',
                 ['KO93_A1', 'KO93_A2', 'KO93_B1', 'KO93_B2', 'KO93_B3']),
                ('KO95',
                 ['KO95_A1', 'KO93_A2', 'KO95_A3', 'KO95_B1', 'KO95_B2']),
                ('DKO', ['DKO_A1', 'DKO_A2', 'DKO_B1', 'DKO_B2']),
            ]
        elif col_groups == 'Sep':
            col_groups = [
                ('P25_EE', ['P25_EE_A1', 'P25_EE_A2', 'P25_EE_A3']),
                ('EE', ['CT_EE_A1', 'CT_EE_A2', 'CT_EE_A3']),
                ('P25', ['P25_HC_A1', 'P25_HC_A2']),
                ('Control', ['CT_HC_A1', 'CT_HC_A2']),
            ]

    # Obtain all named columns
    all_cols = sum([g[1] for g in col_groups], [])
    if group:
        count = data.accession_number.groupby(data.accession_number).count()
        aggregated = data[all_cols].groupby(data.accession_number).mean()
        aggregated['n_pep'] = count
    else:
        aggregated = data

    for name, cols in col_groups:
        f = compare_measures(aggregated,
                             cols,
                             title=title,
                             corr=True,
                             count=False)
        f.set_size_inches(10, 10)
        f.tight_layout(rect=(0, 0, 1, 0.95))
        f.savefig('figures/%s_channel_reps_%s.png' % (title, name), dpi=100)

    # TODO separate aggregation code so cross_groups works with agg
    if cross:
        f = compare_measures(aggregated,
                             all_cols,
                             title="ALL",
                             corr=True,
                             count=False)
        f.set_size_inches(2 * len(all_cols), 2 * len(all_cols))
        f.tight_layout(rect=(0, 0, 1, 0.95))
        f.savefig('figures/%s_channel_reps_ALL.png' % (title, ), dpi=100)
Ejemplo n.º 19
0
'''

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

# 构建数据:100个随机点
points_num = 100
# 之后要往vectors中填充100个点的值
vectors = []
# 用 Numpy 的正态随机分布函数生成 100 个点
# 这些点的(x, y)坐标值: 对应线性方程 y = 0.1 * x + 0.2
# 权重 (Weight) 为 0.1,偏差 (Bias)为 0.2
try:
    # 运行100次
    for i in np.xrange(points_num):
        # 横坐标值,随机正态分布函数。区间0-0.66
        x1 = np.random.normal(0.0, 0.66)
        # 在真实值上加一些偏差
        y1 = 0.1 * x1 + 0.2 + np.random.normal(0.0, 0.04)
        # 将点list加入vectors列表中
        vectors.append([x1, y1])
except:
    for i in range(points_num):
        x1 = np.random.normal(0.0, 0.66)
        y1 = 0.1 * x1 + 0.2 + np.random.normal(0.0, 0.04)
        vectors.append([x1, y1])

x_data = [v[0] for v in vectors]  # 列表生成式取出真实的点的 x 坐标
y_data = [v[1] for v in vectors]  # 真实的点的 y 坐标
Ejemplo n.º 20
0
def run_training():
  """Train MNIST for a number of steps."""
  # Get the sets of images and labels for training, validation, and
  # test on MNIST.
  data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)

  # Tell TensorFlow that the model will be built into the default Graph.
  with tf.Graph().as_default():
    # Generate placeholders for the images and labels.
    images_placeholder, labels_placeholder = placeholder_inputs(
        FLAGS.batch_size)

    # Build a Graph that computes predictions from the inference model.
    logits = mnist.inference(images_placeholder,
                             FLAGS.hidden1,
                             FLAGS.hidden2)

    # Add to the Graph the Ops for loss calculation.
    loss = mnist.loss(logits, labels_placeholder)

    # Add to the Graph the Ops that calculate and apply gradients.
    train_op = mnist.training(loss, FLAGS.learning_rate)

    # Add the Op to compare the logits to the labels during evaluation.
    eval_correct = mnist.evaluation(logits, labels_placeholder)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver()

    # Create a session for running Ops on the Graph.
    sess = tf.Session()

    # Run the Op to initialize the variables.
    init = tf.initialize_all_variables()
    sess.run(init)

    # Instantiate a SummaryWriter to output summaries and the Graph.
    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                            graph_def=sess.graph_def)

    # And then after everything is built, start the training loop.
    for step in np.xrange(FLAGS.max_steps):
      start_time = time.time()

      # Fill a feed dictionary with the actual set of images and labels
      # for this particular training step.
      feed_dict = fill_feed_dict(data_sets.train,
                                 images_placeholder,
                                 labels_placeholder)

      # Run one step of the model.  The return values are the activations
      # from the `train_op` (which is discarded) and the `loss` Op.  To
      # inspect the values of your Ops or variables, you may include them
      # in the list passed to sess.run() and the value tensors will be
      # returned in the tuple from the call.
      _, loss_value = sess.run([train_op, loss],
                               feed_dict=feed_dict)

      duration = time.time() - start_time

      # Write the summaries and print an overview fairly often.
      if step % 100 == 0:
        # Print status to stdout.
        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
        # Update the events file.
        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)

      # Save a checkpoint and evaluate the model periodically.
      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        saver.save(sess, FLAGS.train_dir, global_step=step)
        # Evaluate against the training set.
        print('Training Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.train)
        # Evaluate against the validation set.
        print('Validation Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.validation)
        # Evaluate against the test set.
        print('Test Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.test)
Ejemplo n.º 21
0
def selSPEA2(individuals, k):
    """Apply SPEA-II selection operator on the *individuals*. Usually, the
    size of *individuals* will be larger than *n* because any individual
    present in *individuals* will appear in the returned list at most once.
    Having the size of *individuals* equals to *n* will have no effect other
    than sorting the population according to a strength Pareto scheme. The
    list returned contains references to the input *individuals*. For more
    details on the SPEA-II operator see [Zitzler2001]_.
    :param individuals: A list of individuals to select from.
    :param k: The number of individuals to select.
    :returns: A list of selected individuals.
    .. [Zitzler2001] Zitzler, Laumanns and Thiele, "SPEA 2: Improving the
       strength Pareto evolutionary algorithm", 2001.
    """
    N = len(individuals)
    L = len(individuals[0].fitness.values)
    K = math.sqrt(N)
    strength_fits = [0] * N
    fits = [0] * N
    dominating_inds = [list() for i in np.xrange(N)]

    for i, ind_i in enumerate(individuals):
        for j, ind_j in enumerate(individuals[i+1:], i+1):
            if ind_i.fitness.dominates(ind_j.fitness):
                strength_fits[i] += 1
                dominating_inds[j].append(i)
            elif ind_j.fitness.dominates(ind_i.fitness):
                strength_fits[j] += 1
                dominating_inds[i].append(j)

    for i in np.xrange(N):
        for j in dominating_inds[i]:
            fits[i] += strength_fits[j]

    # Choose all non-dominated individuals
    chosen_indices = [i for i in np.xrange(N) if fits[i] < 1]

    if len(chosen_indices) < k:     # The archive is too small
        for i in np.xrange(N):
            distances = [0.0] * N
            for j in np.xrange(i + 1, N):
                dist = 0.0
                for l in np.xrange(L):
                    val = individuals[i].fitness.values[l] - \
                          individuals[j].fitness.values[l]
                    dist += val * val
                distances[j] = dist
            kth_dist = _randomizedSelect(distances, 0, N - 1, K)
            density = 1.0 / (kth_dist + 2.0)
            fits[i] += density

        next_indices = [(fits[i], i) for i in np.xrange(N)
                        if not i in chosen_indices]
        next_indices.sort()
        #print next_indices
        chosen_indices += [i for _, i in next_indices[:k - len(chosen_indices)]]

    elif len(chosen_indices) > k:   # The archive is too large
        N = len(chosen_indices)
        distances = [[0.0] * N for i in np.xrange(N)]
        sorted_indices = [[0] * N for i in np.xrange(N)]
        for i in np.xrange(N):
            for j in np.xrange(i + 1, N):
                dist = 0.0
                for l in np.xrange(L):
                    val = individuals[chosen_indices[i]].fitness.values[l] - \
                          individuals[chosen_indices[j]].fitness.values[l]
                    dist += val * val
                distances[i][j] = dist
                distances[j][i] = dist
            distances[i][i] = -1

        # Insert sort is faster than quick sort for short arrays
        for i in np.xrange(N):
            for j in np.xrange(1, N):
                l = j
                while l > 0 and distances[i][j] < distances[i][sorted_indices[i][l - 1]]:
                    sorted_indices[i][l] = sorted_indices[i][l - 1]
                    l -= 1
                sorted_indices[i][l] = j

        size = N
        to_remove = []
        while size > k:
            # Search for minimal distance
            min_pos = 0
            for i in np.xrange(1, N):
                for j in np.xrange(1, size):
                    dist_i_sorted_j = distances[i][sorted_indices[i][j]]
                    dist_min_sorted_j = distances[min_pos][sorted_indices[min_pos][j]]

                    if dist_i_sorted_j < dist_min_sorted_j:
                        min_pos = i
                        break
                    elif dist_i_sorted_j > dist_min_sorted_j:
                        break

            # Remove minimal distance from sorted_indices
            for i in np.xrange(N):
                distances[i][min_pos] = float("inf")
                distances[min_pos][i] = float("inf")

                for j in np.xrange(1, size - 1):
                    if sorted_indices[i][j] == min_pos:
                        sorted_indices[i][j] = sorted_indices[i][j + 1]
                        sorted_indices[i][j + 1] = min_pos

            # Remove corresponding individual from chosen_indices
            to_remove.append(min_pos)
            size -= 1

        for index in reversed(sorted(to_remove)):
            del chosen_indices[index]

    return [individuals[i] for i in chosen_indices]
Ejemplo n.º 22
0
    def kernel(self, cineObj, st , mu, LMBD, gamma, nInner, nBreg):
        self.st['sensemap']=self.st['sensemap']*self.st['mask']
        orig_num_ky=numpy.shape(cineObj.tse)[1]
        tse = cineObj.tse[:,orig_num_ky/2 - self.st['Nd'][0]/2 : orig_num_ky/2 + self.st['Nd'][0]/2,:]
#         tse=cineObj.tse
#        tse=numpy.abs(numpy.mean(self.st['sensemap'],-1))

        tse=CsTransform.pynufft.appendmat(tse,self.st['Nd'][1])
        #tse=Normalize(tse)
        tse=numpy.transpose(tse,(0,1,3,2))
        self.ttse=tse#CsTransform.pynufft.Normalize(tse)
        
        self.tse0 = CsTransform.pynufft.CombineMulti(tse, -1)
        print('line392, shape self.tse0',numpy.shape(self.tse0))
        self.filter= numpy.ones(tse.shape)
        dpss = numpy.kaiser(tse.shape[1], 1.0)*10.0
        for ppp in range(0,tse.shape[1]):
            self.filter[:,ppp,:,:]=self.filter[:,ppp,:,:]*dpss[ppp]
            
        
        
        print('tse.shape',tse.shape)
#        L= numpy.size(f)/st['M'] 
#        image_dim=st['Nd']+(L,)
#         
#        if numpy.ndim(f) == 1:# preventing row vector
#            f=numpy.reshape(f,(numpy.shape(f)[0],1),order='F')
#        f0 = numpy.copy(f) # deep copy to prevent scope f0 to f
##        u = numpy.zeros(image_dim,dtype=numpy.complex64)
        f0=numpy.copy(cineObj.f)
        f=numpy.copy(cineObj.f)

#        u0=self.data2rho(f_internal,  
#                         cineObj.dim_x,
#                         self.st['Nd'][0],
#                         self.st['Nd'][1],
#                         cineObj.ncoils,
#                         self.CsTransform
#                         ) # doing spatial transform
        u0 = self.fun1(cineObj)
        
        pdf = cineObj.pdf
        pdf = CsTransform.pynufft.appendmat(pdf,self.st['Nd'][1])
        pdf = numpy.transpose(pdf,(0,1,3,2))
        
#        u0 = fftpack.fftn(u0,axes=(1,))
#        u0 = fftpack.fftshift(u0,axes=(1,))
#        #u0[:,:,u0.shape[2]/2,:] = u0[:,:,u0.shape[2]/2,:]/pdf[:,:,u0.shape[2]/2,:]
#        u0 = u0#/pdf
#        u0 = fftpack.ifftshift(u0,axes=(1,))
#        u0 = fftpack.ifftn(u0,axes=(1,))     
        
#        print('cineObj.pdf.shape',cineObj.pdf.shape)
#        for pj in range(0,4):
#            matplotlib.pyplot.imshow(cineObj.pdf[:,:,pj].real)
#            matplotlib.pyplot.show()
        
        u0=self.fun2(u0)
        
        u0=self.fun3(u0)
        
        u0 = u0*self.st['sensemap'].conj()
        
        u0 = CsTransform.pynufft.CombineMulti(u0,-1)
        print('line443, shape u0',numpy.shape(u0))
        #u0 = u0*self.filter 
        
        uker = self.create_laplacian_kernel(cineObj)
        uker = CsTransform.pynufft.appendmat(uker,u0.shape[3])

        
        self.u0 = u0
        
        u = numpy.copy(self.tse0)
        
       
        print('u0.shape',u0.shape)

        (xx,bb,dd)=self.make_split_variables(u)        

        uf = numpy.copy(u)  # only used for ISRA, written here for generality 
          
        murf = numpy.copy(u) # initial values 
#    #===============================================================================
        #u_stack = numpy.empty(st['Nd']+(nBreg,),dtype=numpy.complex)
        for outer in xrange(0,nBreg):
            for inner in xrange(0,nInner):
                # update u
                print('iterating',[inner,outer])
                #===============================================================
#                 update u  # simple k-space deconvolution to guess initial u
                u = self.update_u(murf, u, uker, xx, bb,cineObj)
                
                c = numpy.max(numpy.abs(u[:])) # Rough coefficient
                # to correct threshold of nonlinear shrink
                
            #===================================================================
            # # update d
            #===================================================================
            #===================================================================
            # Shrinkage: remove tiny values "in somewhere sparse!"
            # dx+bx should be sparse! 
            #===================================================================
            # shrinkage 
            #===================================================================
                dd=self.update_d(u,dd)

                xx=self.shrink( dd, bb, c*1.0/LMBD/numpy.sqrt(numpy.prod(st['Nd'])))
                
                #===============================================================
            #===================================================================
            # # update b
            #===================================================================

                bb=self._update_b(bb, dd, xx)

            if outer < (nBreg-1): # do not update in the last loop
                (f, uf, murf,u)=self.external_update(u, f, uf, f0, u0) # update outer Split_bregman


#         u = CsTransform.pynufft.Normalize(u)
#         for pp in range(0,u0.shape[2]):
#             matplotlib.pyplot.subplot(numpy.sqrt(u0.shape[2])+1,numpy.sqrt(u0.shape[2])+1,pp)
#             matplotlib.pyplot.imshow(numpy.sum(numpy.abs(u[...,pp,:]),-1),norm=norm,interpolation='nearest')
#         matplotlib.pyplot.show()
#        

        
        return (u,uf)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 18:08:05 2018

@author: osboxes
"""

import numpy as np
import cv2

img1 = cv2.imread('images/input/messi5.jpg')

e1 = cv2.getTickCount()
for i in np.xrange(5,49,2):
    img1 = cv2.medianBlur(img1,i)
e2 = cv2.getTickCount()
t = (e2 - e1)/cv2.getTickFrequency()
print(t)

# Result I got is 0.521107655 seconds
Ejemplo n.º 24
0
 def vibrate(self, length):
     if length in np.xrange(1, 4):
         # first byte tells it to vibrate; purpose of second byte is unknown
         self.write_attr(0x19, pack('3B', 3, 1, length))
Ejemplo n.º 25
0
def ellipse_overlap_fast(f1, f2, options):
    opts = {}
    opts['normaliseFrames'] = True
    opts['normalisedScale'] = 30
    opts['minAreaRatio'] = 0.3
    opts['frame2frame'] = False
    opts['fix'] = False
    parse_arg(opts, options)

    N2 = f2.shape[0]

    ellipsePairs = []
    scores = []

    if f1.shape[0] == 0 or f2.shape[0] == 0:
        return ellipsePairs, scores

    f1 = frame2ellipse(f1)
    f2 = frame2ellipse(f2)

    e1, eigVec1 = ellipse_eigen(f1)
    e2, eigVec2 = ellipse_eigen(f2)

    vggEll1 = ellipse2vggformat(f1, e1, eigVec1)
    vggEll2 = ellipse2vggformat(f2, e2, eigVec2)

    a1 = np.pi * np.sqrt(np.prod(e1, axis=1))
    a2 = np.pi * np.sqrt(np.prod(e2, axis=1))

    for i2 in range(N2):
        if opts['normaliseFrames']:
            s = opts['normalisedScale'] / np.sqrt(a2[i2] / np.pi)
        else:
            s = 1

        if opts['frame2frame']:
            ellipsePairs[i2] = np.hstack[i2 *
                                         np.ones((f1.shape[0], 1), dtype=np.int), np.xrange(f1.shape[0])]
        else:
            thr = 4 * np.sqrt(a2[i2] / np.pi)
            if opts['fix']:
                thr = thr * s
            canOverlap = scipy.spatial.distance.cdist(
                f2[[i2], 0:2], f1[:, 0:2], 'euclidean') < thr
            maxOverlap = np.minimum(a2[i2], a1) / \
                np.maximum(a2[i2], a1) * canOverlap
            _, pairs = np.where(maxOverlap > opts['minAreaRatio'])
            ellipsePairs.extend(zip([i2] * pairs.shape[0], pairs.tolist()))
        if len(pairs) == 0:
            continue

        if opts['normaliseFrames']:
            vggS = np.array([1, 1, 1 / s**2, 1 / s**2, 1 / s**2, s, s, s, s])
            lhsEllipse = vggS * vggEll2[[i2]]
            rhsEllipse = vggEll1[pairs] * vggS
        else:
            lhsEllipse = vggEll2[[i2]]
            rhsEllipse = vggEll1[pairs]
        _, tw, _, _ = bench.vgg_compute_ellipse_overlap.vgg_compute_ellipse_overlap(
            lhsEllipse, rhsEllipse, -1)
        scores.extend((1 - tw / 100).tolist()[0])
    return np.array(ellipsePairs), np.array(scores)
Ejemplo n.º 26
0
def update(row,trainLabel):
    global w, b
    for i in np.xrange(len(row)):
        w[i] += trainLabel * row[i]
    b += trainLabel
Ejemplo n.º 27
0
def chain(cosmo, data, command_line):
    """
    Run a Markov chain of fixed length with a Metropolis Hastings algorithm.

    Main function of this module, this is the actual Markov chain procedure.
    After having selected a starting point in parameter space defining the
    first **last accepted** one, it will, for a given amount of steps :

    + choose randomly a new point following the *proposal density*,
    + compute the cosmological *observables* through the cosmological module,
    + compute the value of the *likelihoods* of the desired experiments at this
      point,
    + *accept/reject* this point given its likelihood compared to the one of
      the last accepted one.

    Every time the code accepts :code:`data.write_step` number of points
    (quantity defined in the input parameter file), it will write the result to
    disk (flushing the buffer by forcing to exit the output file, and reopen it
    again.

    .. note::

        to use the code to set a fiducial file for certain fixed parameters,
        you can use two solutions. The first one is to put all input 1-sigma
        proposal density to zero (this method still works, but is not
        recommended anymore). The second one consist in using the flag "-f 0",
        to force a step of zero amplitude.

    """

    ## Initialisation
    loglike = 0

    # In case command_line.silent has been asked, outputs should only contain
    # data.out. Otherwise, it will also contain sys.stdout
    outputs = [data.out]
    if not command_line.silent:
        outputs.append(sys.stdout)

    use_mpi = False
    # check for MPI
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        # suppress duplicate output from slaves
        if rank:
            command_line.quiet = True
        use_mpi = True
    except ImportError:
        # set all chains to master if no MPI
        rank = 0

    # Initialise master and slave chains for superupdate.
    # Workaround in order to have one master chain and several slave chains even when
    # communication fails between MPI chains. It could malfunction on some hardware.
    # TODO: Would like to merge with MPI initialization above and make robust and logical
    # TODO: Or if keeping current scheme, store value and delete jumping_factor.txt
    # TODO: automatically if --parallel-chains is enabled
    if command_line.superupdate and data.jumping_factor:
        try:
            jump_file = open(command_line.folder + '/jumping_factor.txt','r')
            #if command_line.restart is None:
            if not use_mpi and command_line.parallel_chains:
                rank = 1
                warnings.warn('MPI not in use, flag --parallel-chains enabled, '
                              'superupdate enabled, and a jumping_factor.txt file detected. '
                              'If relaunching in the same folder or restarting a run this '
                              'will cause all chains to be assigned as slaves. In this case '
                              'instead note the value in jumping_factor.txt, delete the '
                              'file, and pass the value with flag -f <value>. This warning '
                              'may then appear again, but you can safely disregard it.')
            else:
                # For restart runs we want to save the input jumping factor
                # as starting jumping factor, but continue from the jumping
                # factor stored in the file.
                starting_jumping_factor = data.jumping_factor
                # This will load the value irrespective of whether it starts
                # with # (i.e. the jumping factor adaptation was started) or not.
                jump_value = jump_file.read().replace('# ','')
                data.jumping_factor = float(jump_value)
            jump_file.close()
            print('rank = ',rank)
        except:
            jump_file = open(command_line.folder + '/jumping_factor.txt','w')
            jump_file.write(str(data.jumping_factor))
            jump_file.close()
            rank = 0
            print('rank = ',rank)
            starting_jumping_factor = data.jumping_factor

    # Recover the covariance matrix according to the input, if the varying set
    # of parameters is non-zero
    if (data.get_mcmc_parameters(['varying']) != []):

        # Read input covariance matrix
        sigma_eig, U, C = sampler.get_covariance_matrix(cosmo, data, command_line)

        # if we want to compute the starting point by minimising lnL (instead of taking it from input file or bestfit file)
        minimum = 0
        if command_line.minimize:
            minimum = sampler.get_minimum(cosmo, data, command_line, C)
            parameter_names = data.get_mcmc_parameters(['last_accepted'])
            for index,elem in parameter_names:
                data.mcmc_parameters[elem]['last_accepted'] = minimum[index]

        # if we want to compute Fisher matrix and then stop
        if command_line.fisher:
            sampler.get_fisher_matrix(cosmo, data, command_line, C, minimum)
            return

        # warning if no jumps are requested
        if data.jumping_factor == 0:
            warnings.warn(
                "The jumping factor has been set to 0. The above covariance " +
                "matrix will not be used.")

    # In case of a fiducial run (all parameters fixed), simply run once and
    # print out the likelihood. This should not be used any more (one has to
    # modify the log.param, which is never a good idea. Instead, force the code
    # to use a jumping factor of 0 with the option "-f 0".
    else:
        warnings.warn(
            "You are running with no varying parameters... I will compute " +
            "only one point and exit")
        data.update_cosmo_arguments()  # this fills in the fixed parameters
        loglike = sampler.compute_lkl(cosmo, data)
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    # In the fast-slow method, one need the Cholesky decomposition of the
    # covariance matrix. Return the Cholesky decomposition as a lower
    # triangular matrix
    Cholesky = None
    Rotation = None
    if command_line.jumping == 'fast':
        Cholesky = la.cholesky(C).T
        Rotation = np.identity(len(sigma_eig))

    # define path and covmat
    input_covmat = command_line.cov
    base = os.path.basename(command_line.folder)
    # the previous line fails when "folder" is a string ending with a slash. This issue is cured by the next lines:
    if base == '':
        base = os.path.basename(command_line.folder[:-1])
    command_line.cov = os.path.join(
        command_line.folder, base+'.covmat')

    # Fast Parameter Multiplier (fpm) for adjusting update and superupdate numbers.
    # This is equal to N_slow + f_fast N_fast, where N_slow is the number of slow
    # parameters, f_fast is the over sampling number for each fast block and f_fast
    # is the number of parameters in each fast block.
    for i in range(len(data.block_parameters)):
        if i == 0:
            fpm = data.over_sampling[i]*data.block_parameters[i]
        else:
            fpm += data.over_sampling[i]*(data.block_parameters[i] - data.block_parameters[i-1])

    # If the update mode was selected, the previous (or original) matrix should be stored
    if command_line.update:
        if not rank and not command_line.silent:
            print('Update routine is enabled with value %d (recommended: 50)' % command_line.update)
            print('This number is rescaled by cycle length %d (N_slow + f_fast * N_fast) to %d' % (fpm,fpm*command_line.update))
        # Rescale update number by cycle length N_slow + f_fast * N_fast to account for fast parameters
        command_line.update *= fpm
        previous = (sigma_eig, U, C, Cholesky)

    # Initialise adaptive
    if command_line.adaptive:
        if not command_line.silent:
            print('Adaptive routine is enabled with value %d (recommended: 10*dimension)' % command_line.adaptive)
            print('and adaptive_ts = %d (recommended: 100*dimension)' % command_line.adaptive_ts)
            print('Please note: current implementation not suitable for multiple chains')
        if rank > 0:
            raise io_mp.ConfigurationError('Adaptive routine not compatible with MPI')
        if command_line.update:
            warnings.warn('Adaptive routine not compatible with update, overwriting input update value')
        if command_line.superupdate:
            warnings.warn('Adaptive routine not compatible with superupdate, deactivating superupdate')
            command_line.superupdate = 0
        # Define needed parameters
        parameter_names = data.get_mcmc_parameters(['varying'])
        mean = np.zeros(len(parameter_names))
        last_accepted = np.zeros(len(parameter_names),'float64')
        ar = np.zeros(100)
        if command_line.cov == None:
            # If no input covmat was given, the starting jumping factor
            # should be very small until a covmat is obtained and the
            # original start jumping factor should be saved
            start_jumping_factor = command_line.jumping_factor
            data.jumping_factor = command_line.jumping_factor/100.
            # Analyze module will be forced to compute one covmat,
            # after which update flag will be set to False.
            command_line.update = command_line.adaptive
        else:
            # If an input covmat was provided, take mean values from param file
            # Question: is it better to always do this, rather than setting mean
            # to last accepted after the initial update run?
            for elem in parameter_names:
                mean[parameter_names.index(elem)] = data.mcmc_parameters[elem]['initial'][0]

    # Initialize superupdate
    if command_line.superupdate:
        if not rank and not command_line.silent:
            print('Superupdate routine is enabled with value %d (recommended: 20)' % command_line.superupdate)
            if command_line.superupdate < 20:
                warnings.warn('Superupdate value lower than the recommended value. This '
                              'may increase the risk of poorly converged acceptance rate')
            print('This number is rescaled by cycle length %d (N_slow + f_fast * N_fast) to %d' % (fpm,fpm*command_line.superupdate))
        # Rescale superupdate number by cycle length N_slow + f_fast * N_fast to account for fast parameters
        command_line.superupdate *= fpm
        # Define needed parameters
        parameter_names = data.get_mcmc_parameters(['varying'])
        updated_steps = 0
        stop_c = False
        jumping_factor_rescale = 0
        if command_line.restart:
            try:
                jump_file = open(command_line.cov,'r')
                jumping_factor_rescale = 1
            except:
                jumping_factor_rescale = 0
        c_array = np.zeros(command_line.superupdate) # Allows computation of mean of jumping factor
        R_minus_one = np.array([100.,100.]) # 100 to make sure max(R-1) value is high if computation failed
        # Local acceptance rate of last SU*(N_slow + f_fast * N_fast) steps
        ar = np.zeros(command_line.superupdate)
        # Store acceptance rate of last 5*SU*(N_slow + f_fast * N_fast) steps
        backup_ar = np.zeros(5*command_line.superupdate)
        # Make sure update is enabled
        if command_line.update == 0:
            if not rank and not command_line.silent:
                print('Update routine required by superupdate. Setting --update 50')
                print('This number is then rescaled by cycle length: %d (N_slow + f_fast * N_fast)' % fpm)
            command_line.update = 50 * fpm
            previous = (sigma_eig, U, C, Cholesky)

    # If restart wanted, pick initial value for arguments
    if command_line.restart is not None:
        sampler.read_args_from_chain(data, command_line.restart)

    # If restart from best fit file, read first point (overwrite settings of
    # read_args_from_chain)
    if command_line.bf is not None and not command_line.minimize:
        sampler.read_args_from_bestfit(data, command_line.bf)

    # Pick a position (from last accepted point if restart, from the mean value
    # else), with a 100 tries.
    for i in range(100):
        if get_new_position(data, sigma_eig, U, i,
                            Cholesky, Rotation) is True:
            break
        if i == 99:
            raise io_mp.ConfigurationError(
                "You should probably check your prior boundaries... because " +
                "no valid starting position was found after 100 tries")

    # Compute the starting Likelihood
    loglike = sampler.compute_lkl(cosmo, data)

    # Choose this step as the last accepted value
    # (accept_step), and modify accordingly the max_loglike
    sampler.accept_step(data)
    max_loglike = loglike

    # If the jumping factor is 0, the likelihood associated with this point is
    # displayed, and the code exits.
    if data.jumping_factor == 0:
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    acc, rej = 0.0, 0.0  # acceptance and rejection number count
    N = 1   # number of time the system stayed in the current position

    # Print on screen the computed parameters
    if not command_line.silent and not command_line.quiet:
        io_mp.print_parameters(sys.stdout, data)

    # Suppress non-informative output after initializing
    command_line.quiet = True

    k = 1
    # Main loop, that goes on while the maximum number of failure is not
    # reached, and while the expected amount of steps (N) is not taken.
    while k <= command_line.N:
        # If the number of steps reaches the number set in the adaptive method plus one,
        # then the proposal distribution should be gradually adapted.
        # If the number of steps also exceeds the number set in adaptive_ts,
        # the jumping factor should be gradually adapted.
        if command_line.adaptive and k>command_line.adaptive+1:
            # Start of adaptive routine
            # By B. Schroer and T. Brinckmann
            # Modified version of the method outlined in the PhD thesis of Marta Spinelli

            # Store last accepted step
            for elem in parameter_names:
                last_accepted[parameter_names.index(elem)] = data.mcmc_parameters[elem]['last_accepted']
            # Recursion formula for mean and covmat (and jumping factor after ts steps)
            # mean(k) = mean(k-1) + (last_accepted - mean(k-1))/k
            mean += 1./k*(last_accepted-mean)
            # C(k) = C(k-1) + [(last_accepted - mean(k))^T * (last_accepted - mean(k)) - C(k-1)]/k
            C +=1./k*(np.dot(np.transpose(np.asmatrix(last_accepted-mean)),np.asmatrix(last_accepted-mean))-C)
            sigma_eig, U = np.linalg.eig(np.linalg.inv(C))
            if command_line.jumping == 'fast':
                Cholesky = la.cholesky(C).T
            if k>command_line.adaptive_ts:
                # c = j^2/d
                c = data.jumping_factor**2/len(parameter_names)
                # c(k) = c(k-1) + [acceptance_rate(last 100 steps) - 0.25]/k
                c +=(np.mean(ar)-0.25)/k
                data.jumping_factor = np.sqrt(len(parameter_names)*c)

            # Save the covariance matrix and the jumping factor in a file
            # For a possible MPI implementation
            #if not (k-command_line.adaptive) % 5:
            #    io_mp.write_covariance_matrix(C,parameter_names,str(command_line.cov))
            #    jump_file = open(command_line.folder + '/jumping_factor.txt','w')
            #    jump_file.write(str(data.jumping_factor))
            #    jump_file.close()
            # End of adaptive routine

        # If the number of steps reaches the number set in the update method,
        # then the proposal distribution should be adapted.
        if command_line.update:
            # Start of update routine
            # By M. Ballardini and T. Brinckmann
            # Also used by superupdate and adaptive

    # master chain behavior
            if not rank:
                # Add the folder to the list of files to analyze, and switch on the
                # options for computing only the covmat
                from parser_mp import parse
                info_command_line = parse(
                    'info %s --minimal --noplot --keep-fraction 0.5 --keep-non-markovian --want-covmat' % command_line.folder)
                info_command_line.update = command_line.update

                if command_line.adaptive:
                    # Keep all points for covmat guess in adaptive
                    info_command_line = parse('info %s --minimal --noplot --keep-non-markovian --want-covmat' % command_line.folder)
                    # Tell the analysis to update the covmat after t0 steps if it is adaptive
                    info_command_line.adaptive = command_line.adaptive
                    # Only compute covmat if no input covmat was provided
                    if input_covmat != None:
                        info_command_line.want_covmat = False

                # This is in order to allow for more frequent R-1 computation with superupdate
                compute_R_minus_one = False
                if command_line.superupdate:
                    if not (k+10) % command_line.superupdate:
                        compute_R_minus_one = True
                # the +10 below is here to ensure that the first master update will take place before the first slave updates,
                # but this is a detail, the code is robust against situations where updating is not possible, so +10 could be omitted
                if (not (k+10) % command_line.update or compute_R_minus_one) and k > 10:
                    # Try to launch an analyze (computing a new covmat if successful)
                    try:
                        if not (k+10) % command_line.update:
                            from analyze import analyze
                            R_minus_one = analyze(info_command_line)
                        elif command_line.superupdate:
                            # Compute (only, i.e. no covmat) R-1 more often when using superupdate
                            info_command_line = parse(
                                'info %s --minimal --noplot --keep-fraction 0.5 --keep-non-markovian' % command_line.folder)
                            info_command_line.update = command_line.update
                            R_minus_one = analyze(info_command_line)
                    except:
                        if not command_line.silent:
                            print('Step ',k,' chain ', rank,': Failed to calculate covariance matrix')

                if command_line.superupdate:
                    # Start of superupdate routine
                    # By B. Schroer and T. Brinckmann

                    c_array[(k-1)%(command_line.superupdate)] = data.jumping_factor

                    # If acceptance rate deviates too much from the target acceptance
                    # rate we want to resume adapting the jumping factor
                    # T. Brinckmann 02/2019: use mean a.r. over the last 5*len(ar) steps
                    # instead or the over last len(ar), which is more stable
                    if abs(np.mean(backup_ar) - command_line.superupdate_ar) > 5.*command_line.superupdate_ar_tol:
                        stop_c = False

                    # Start adapting the jumping factor after command_line.superupdate steps if R-1 < 10
                    # The lower R-1 criterium is an arbitrary choice to keep from updating when the R-1
                    # calculation fails (i.e. returns only zeros).
                    if (k > updated_steps + command_line.superupdate) and 0.01 < (max(R_minus_one) < 10.) and not stop_c:
                        c = data.jumping_factor**2/len(parameter_names)
                        # To avoid getting trapped in local minima, the jumping factor should
                        # not go below 0.1 (arbitrary) times the starting jumping factor.
                        if (c + (np.mean(ar) - command_line.superupdate_ar)/(k - updated_steps)) > (0.1*starting_jumping_factor)**2./len(parameter_names) or ((np.mean(ar) - command_line.superupdate_ar)/(k - updated_steps) > 0):
                            c += (np.mean(ar) - command_line.superupdate_ar)/(k - updated_steps)
                            data.jumping_factor = np.sqrt(len(parameter_names) * c)

                        if not (k-1) % 5:
                            # Check if the jumping factor adaptation should stop.
                            # An acceptance rate of 25% balances the wish for more accepted
                            # points, while ensuring the parameter space is properly sampled.
                            # The convergence criterium is by default (26+/-1)%, so the adaptation
                            # will stop when the code reaches an acceptance rate of at least 25%.
                            # T. Brinckmann 02/2019: use mean a.r. over the last 5*len(ar) steps
                            # instead or the over last len(ar), which is more stable
                            if (max(R_minus_one) < 0.4) and (abs(np.mean(backup_ar) - command_line.superupdate_ar) < command_line.superupdate_ar_tol) and (abs(np.mean(c_array)/c_array[(k-1) % (command_line.superupdate)] - 1) < 0.01):
                                stop_c = True
                                data.out.write('# After %d accepted steps: stop adapting the jumping factor at a value of %f with a local acceptance rate %f \n' % (int(acc),data.jumping_factor,np.mean(backup_ar)))
                                if not command_line.silent:
                                    print('After %d accepted steps: stop adapting the jumping factor at a value of %f with a local acceptance rate of %f \n' % (int(acc), data.jumping_factor,np.mean(backup_ar)))
                                jump_file = open(command_line.folder + '/jumping_factor.txt','w')
                                jump_file.write('# '+str(data.jumping_factor))
                                jump_file.close()
                            else:
                                jump_file = open(command_line.folder + '/jumping_factor.txt','w')
                                jump_file.write(str(data.jumping_factor))
                                jump_file.close()

                    # Write the evolution of the jumping factor to a file
                    if not k % (command_line.superupdate):
                        jump_file = open(command_line.folder + '/jumping_factors.txt','a')
                        for i in np.xrange(command_line.superupdate):
                            jump_file.write(str(c_array[i])+'\n')
                        jump_file.close()
                    # End of main part of superupdate routine

                if not (k-1) % (command_line.update/3):
                    try:
                        # Read the covmat
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0,0] == previous[2][0,0]:
                            if k == 1:
                                if not command_line.silent:
                                    if not input_covmat == None:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s instead of %s. '
                                            'If new input covmat is desired, please delete previous covmat.'
                                            % (command_line.cov, input_covmat))
                                    else:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s. '
                                            'If no starting covmat is desired, please delete previous covmat.'
                                            % command_line.cov)
                            else:
                                # Start of second part of superupdate routine
                                if command_line.superupdate:
                                    # Adaptation of jumping factor should start again after the covmat is updated
                                    # Save the step number after it updated for superupdate and start adaption of c again
                                    updated_steps = k
                                    stop_c = False
                                    cov_det = np.linalg.det(C)
                                    prev_cov_det = np.linalg.det(previous[2])
                                    # Rescale jumping factor in order to keep the magnitude of the jumps the same.
                                    # Skip this update the first time the covmat is updated in order to prevent
                                    # problems due to a poor initial covmat. Rescale the jumping factor after the
                                    # first calculated covmat to the expected optimal one of 2.4.
                                    if jumping_factor_rescale:
                                        new_jumping_factor = data.jumping_factor * (prev_cov_det/cov_det)**(1./(2 * len(parameter_names)))
                                        data.out.write('# After %d accepted steps: rescaled jumping factor from %f to %f, due to updated covariance matrix \n' % (int(acc), data.jumping_factor, new_jumping_factor))
                                        if not command_line.silent:
                                            print('After %d accepted steps: rescaled jumping factor from %f to %f, due to updated covariance matrix \n' % (int(acc), data.jumping_factor, new_jumping_factor))
                                        data.jumping_factor = new_jumping_factor
                                    else:
                                        data.jumping_factor = starting_jumping_factor
                                    jumping_factor_rescale += 1
                                # End of second part of superupdate routine

                                # Write to chains file when the covmat was updated
                                data.out.write('# After %d accepted steps: update proposal with max(R-1) = %f and jumping factor = %f \n' % (int(acc), max(R_minus_one), data.jumping_factor))
                                if not command_line.silent:
                                    print('After %d accepted steps: update proposal with max(R-1) = %f and jumping factor = %f \n' % (int(acc), max(R_minus_one), data.jumping_factor))
                                try:
                                    if stop_after_update:
                                        k = command_line.N
                                        print('Covariance matrix updated - stopping run')
                                except:
                                    pass

                            previous = (sigma_eig, U, C, Cholesky)
                    except:
                        pass

                    command_line.quiet = True

                    # Start of second part of adaptive routine
            # Stop updating the covmat after t0 steps in adaptive
                if command_line.adaptive and k > 1:
                    command_line.update = 0
                    data.jumping_factor = start_jumping_factor
            # Test if there are still enough steps left before the adaption of the jumping factor starts
                    if k > 0.5*command_line.adaptive_ts:
                        command_line.adaptive_ts += k
            # Set the mean for the recursion formula to the last accepted point
                    for elem in parameter_names:
                        mean[parameter_names.index(elem)] = data.mcmc_parameters[elem]['last_accepted']
                    # End of second part of adaptive routine

            # slave chain behavior
            else:
                # Start of slave superupdate routine
                if command_line.superupdate:
                    # If acceptance rate deviates too much from the target acceptance
                    # rate we want to resume adapting the jumping factor. This line
                    # will force the slave chains to check if the jumping factor
                    # has been updated
                    if abs(np.mean(backup_ar) - command_line.superupdate_ar) > 5.*command_line.superupdate_ar_tol:
                        stop_c = False

            # Update the jumping factor every 5 steps in superupdate
            if not k % 5 and k > command_line.superupdate and command_line.superupdate and (not stop_c or (stop_c and k % command_line.update)):
                try:
                            jump_file = open(command_line.folder + '/jumping_factor.txt','r')
                            # If there is a # in the file, the master has stopped adapting c
                            for line in jump_file:
                                if line.find('#') == -1:
                                    jump_file.seek(0)
                                    jump_value = jump_file.read()
                                    data.jumping_factor = float(jump_value)
                                else:
                                    jump_file.seek(0)
                                    jump_value = jump_file.read().replace('# ','')
                                    #if not stop_c or (stop_c and not float(jump_value) == data.jumping_factor):
                                    if not float(jump_value) == data.jumping_factor:
                                        data.jumping_factor = float(jump_value)
                                        stop_c = True
                                        data.out.write('# After %d accepted steps: stop adapting the jumping factor at a value of %f with a local acceptance rate %f \n' % (int(acc),data.jumping_factor,np.mean(backup_ar)))
                                        if not command_line.silent:
                                            print('After %d accepted steps: stop adapting the jumping factor at a value of %f with a local acceptance rate of %f \n' % (int(acc), data.jumping_factor,np.mean(backup_ar)))
                            jump_file.close()
                except:
                            if not command_line.silent:
                                print('Reading jumping_factor file failed')
                pass
                # End of slave superupdate routine

                # Start of slave update routine
                if not (k-1) % (command_line.update/10):
                    try:
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0,0] == previous[2][0,0] and not k == 1:
                            if command_line.superupdate:
                                # If the covmat was updated, the master has resumed adapting c
                                stop_c = False
                            data.out.write('# After %d accepted steps: update proposal \n' % int(acc))
                            if not command_line.silent:
                                print('After %d accepted steps: update proposal \n' % int(acc))
                            try:
                                if stop_after_update:
                                    k = command_line.N
                                    print('Covariance matrix updated - stopping run')
                            except:
                                pass
                        previous = (sigma_eig, U, C, Cholesky)
                        
                    except:
                        pass
                    
                    # End of slave update routine
            # End of update routine
            
        # Pick a new position ('current' flag in mcmc_parameters), and compute
        # its likelihood. If get_new_position returns True, it means it did not
        # encounter any boundary problem. Otherwise, just increase the
        # multiplicity of the point and start the loop again
        
        if get_new_position(
                data, sigma_eig, U, k, Cholesky, Rotation) is True:
            newloglike = sampler.compute_lkl(cosmo, data)
        else:  # reject step
            rej += 1
            if command_line.superupdate:
                ar[k%len(ar)] = 0 # Local acceptance rate of last SU*(N_slow + f_fast * N_fast) steps
            elif command_line.adaptive:
                ar[k%len(ar)] = 0 # Local acceptance rate of last 100 steps
            N += 1
            k += 1
            continue

        # Harmless trick to avoid exponentiating large numbers. This decides
        # whether or not the system should move.
        if (newloglike != data.boundary_loglike):
            if (newloglike >= loglike):
                alpha = 1.
            else:
                alpha = np.exp(newloglike-loglike)
        else:
            alpha = -1

        if ((alpha == 1.) or (rd.uniform(0, 1) < alpha)):  # accept step

            # Print out the last accepted step (WARNING: this is NOT the one we
            # just computed ('current' flag), but really the previous one.)
            # with its proper multiplicity (number of times the system stayed
            # there).
            io_mp.print_vector(outputs, N, loglike, data)

            # Report the 'current' point to the 'last_accepted'
            sampler.accept_step(data)
            loglike = newloglike
            if loglike > max_loglike:
                max_loglike = loglike
            acc += 1.0
            N = 1  # Reset the multiplicity
            if command_line.superupdate:
                ar[k%len(ar)] = 1 # Local acceptance rate of last SU*(N_slow + f_fast * N_fast) steps
            elif command_line.adaptive:
                ar[k%len(ar)] = 1 # Local acceptance rate of last 100 steps
        else:  # reject step
            rej += 1.0
            N += 1  # Increase multiplicity of last accepted point
            if command_line.superupdate:
                ar[k%len(ar)] = 0 # Local acceptance rate of last SU*(N_slow + f_fast * N_fast) steps
            elif command_line.adaptive:
                ar[k%len(ar)] = 0 # Local acceptance rate of last 100 steps

        # Store a.r. for last 5 x SU*(N_slow + f_fast * N_fast) steps
        if command_line.superupdate:
            backup_ar[k%len(backup_ar)] = ar[k%len(ar)]

        # Regularly (option to set in parameter file), close and reopen the
        # buffer to force to write on file.
        if acc % data.write_step == 0:
            io_mp.refresh_file(data)
            # Update the outputs list
            outputs[0] = data.out
        k += 1  # One iteration done
    # END OF WHILE LOOP

    # If at this moment, the multiplicity is higher than 1, it means the
    # current point is not yet accepted, but it also mean that we did not print
    # out the last_accepted one yet. So we do.
    if N > 1:
        io_mp.print_vector(outputs, N-1, loglike, data)

    # Print out some information on the finished chain
    rate = acc / (acc + rej)
    sys.stdout.write('\n#  {0} steps done, acceptance rate: {1}\n'.
                     format(command_line.N, rate))

    # In case the acceptance rate is too low, or too high, print a warning
    if rate < 0.05:
        warnings.warn("The acceptance rate is below 0.05. You might want to "
                      "set the jumping factor to a lower value than the "
                      "default (2.4), with the option `-f 1.5` for instance.")
    elif rate > 0.6:
        warnings.warn("The acceptance rate is above 0.6, which means you might"
                      " have difficulties exploring the entire parameter space"
                      ". Try analysing these chains, and use the output "
                      "covariance matrix to decrease the acceptance rate to a "
                      "value between 0.2 and 0.4 (roughly).")
    # For a restart, erase the starting point to keep only the new, longer
    # chain.
    if command_line.restart is not None:
        os.remove(command_line.restart)
        sys.stdout.write('    deleting starting point of the chain {0}\n'.
                         format(command_line.restart))

    return
Ejemplo n.º 28
0
    def _make_sense(self,u0):
        st=self.st
        L=numpy.shape(u0)[-1]
        u0dims= numpy.ndim(u0)
        print('in make_sense, u0.shape',u0.shape)
        if u0dims-1 >0:
            rows=numpy.shape(u0)[0]
#             dpss_rows = numpy.kaiser(rows, 100)     
#             dpss_rows = numpy.fft.fftshift(dpss_rows)
#             dpss_rows[3:-3] = 0.0
            dpss_rows = numpy.ones(rows) 
            # replace above sensitivity because
            # Frequency direction is not necessary
            dpss_fil = dpss_rows
            print('dpss shape',dpss_fil.shape)
        if u0dims-1 > 1:
                               
            cols=numpy.shape(u0)[1]
            dpss_cols = numpy.kaiser(cols, 100)            
            dpss_cols = numpy.fft.fftshift(dpss_cols)
            dpss_cols[3:-3] = 0.0
             
            dpss_fil = CsTransform.pynufft.appendmat(dpss_fil,cols)
            dpss_cols  = CsTransform.pynufft.appendmat(dpss_cols,rows)
 
            dpss_fil=dpss_fil*numpy.transpose(dpss_cols,(1,0))
            print('dpss shape',dpss_fil.shape)
        if u0dims-1 > 2:
             
            zag = numpy.shape(u0)[2]
            dpss_zag = numpy.kaiser(zag, 100)            
            dpss_zag = numpy.fft.fftshift(dpss_zag)
            dpss_zag[3:-3] = 0.0
            dpss_fil = CsTransform.pynufft.appendmat(dpss_fil,zag)
                      
            dpss_zag = CsTransform.pynufft.appendmat(dpss_zag,rows)
             
            dpss_zag = CsTransform.pynufft.appendmat(dpss_zag,cols)
             
            dpss_fil=dpss_fil*numpy.transpose(dpss_zag,(1,2,0)) # low pass filter
            print('dpss shape',dpss_fil.shape)
        #dpss_fil=dpss_fil / 10.0
         
        rms=numpy.sqrt(numpy.mean(u0*u0.conj(),-1)) # Root of sum square
        st['sensemap']=numpy.ones(numpy.shape(u0),dtype=numpy.complex64)
        print('sensemap shape',st['sensemap'].shape, L)
        print('u0shape',u0.shape,rms.shape)
 
        #    print('L',L)
        #    print('rms',numpy.shape(rms))
        for ll in xrange(0,L):
            st['sensemap'][...,ll]=(u0[...,ll]+1e-16)/(rms+1e-16)
             
            print('sensemap shape',st['sensemap'].shape, L)
            print('rmsshape', rms.shape) 
            st['sensemap'][...,ll] = fftpack.fftn(st['sensemap'][...,ll], 
                                              st['sensemap'][...,ll].shape,
                                                    range(0,numpy.ndim(st['sensemap'][...,ll]))) 
            st['sensemap'][...,ll] = st['sensemap'][...,ll] * dpss_fil
            st['sensemap'][...,ll] = fftpack.ifftn(st['sensemap'][...,ll], 
                                              st['sensemap'][...,ll].shape,
                                                    range(0,numpy.ndim(st['sensemap'][...,ll])))                             
#             st['sensemap'][...,ll]=fftpack.ifftn(fftpack.fftn(st['sensemap'][...,ll])*dpss_fil)
#         st['sensemap'] = Normalize(st['sensemap'])
        return st