def agent(observation, configuration):
     value, policy = model.get_preds(observation)
     times_visited = model.get_N(observation)
     # Return which column to drop a checker (action).
     if (tau == 0):
         action = np.argmax(times_visisted)
     else:
         weighted_times_visited = [x ^ (1 / tau) for x in times_visited]
         weighted_sum = sum(weighted_times_visited)
         action_probs = [x / weighted_sum for x in weighted_times_visited]
         np.choice(len(weighted_times_visited), 1, p=action_probs)[0]
     return action
Exemplo n.º 2
0
def calculate_new_state(grid_map, old_state, action, crash_type, start_locs):
    vel_row = action[0] + old_state[1][0]
    # Make sure velocity stays in between -5 and 5 inclusive
    if vel_row > 5:
        vel_row = 5
    if vel_row < -5:
        vel_row = -5
    vel_col = action[1] + old_state[1][1]
    # Make sure velocity stays in between -5 and 5 inclusive
    if vel_col > 5:
        vel_col = 5
    if vel_col < -5:
        vel_col = -5
    pos_row = vel_row + old_state[0][0]
    pos_col = vel_col + old_state[0][1]
    # Check to make sure it didn't cross any walls or we finished the race
    crashed_or_finished = \
        determine_if_crashed_or_finished(grid_map, old_state[0], (vel_row,vel_col))
    crashed = crashed_or_finished[0]
    finished = crashed_or_finished[1]
    finished_coors = crashed_or_finished[2]
    if finished:
        pos_row = finished_coors[0]
        pos_col = finished_coors[1]
    elif crashed:
        vel_row = 0; vel_col = 0;
        if crash_type == 'soft':
            pos_row = old_state[0][0]
            pos_col = old_state[0][1]
        else:
            new_pos_loc = np.choice(start_locs)
            pos_row = new_pos_loc[0]
            pos_col = new_pos_loc[1]

    return pos_row,pos_col,vel_row,vel_col
Exemplo n.º 3
0
def score_dataset(model, dataset):
    """
    Given a neural net classification model and a dataset of batches, evaluate
    the model on all the batches and return the predictions along with the truth
    values for every batch.

    :param model:   [nn.Module] -- A classification network (single arg)
    :param dataset: [Batch Iterator] -- The training set of batches
    :returns:       [List[Tuple]] -- A flat list of tuples, one tuple for each
                                     training instance, where the tuple is of
                                     the form (prediction, truth)
    """
    scores = list()
    model.eval()  # Set the model to evaluation mode
    classes = [0, 1]
    with torch.no_grad():
        for i, batch in enumerate(dataset):
            # Run the model on the input batch
            output = model((batch.code, batch.comm))

            # Get predictions for every instance in the batch
            signum_outs = torch.sign(output).cpu().numpy()
            preds = [
                0 if arr_el[0] < 0 else
                1 if arr_el[0] > 0 else np.choice(classes)
                for arr_el in signum_outs
            ]

            # Prepare truth data
            truth = batch.label.cpu().numpy()

            # Add new tuples to output
            scores.extend([(p, int(t)) for p, t in zip(preds, truth)])
    return scores
Exemplo n.º 4
0
def expantion(n):
    idx = choice([i for i, ni in enumerate(n.children) if ni is None])
    newst = move(idx, n.player, n.state)
    newNode = node(newst, nextPlayer(n.player),
                   [element(n.state, i) for i in range(9)])
    n.children[idx] = newNode
    return newNode, idx
def mc_control(env,
               num_episodes,
               alpha,
               gamma=1.0,
               eps=1,
               final_eps=0.1,
               stop_eps_after=0.5,
               every_visit=False):
    nA = env.action_space.n
    # initialize empty dictionary of arrays
    Q = defaultdict(lambda: np.zeros(nA))
    policy = defaultdict(lambda: np.choice(np.arange(nA)))

    # eps will decrease linearly and reach final_eps in episode stop_eps_at_episode
    final_eps = min(eps, final_eps)
    stop_eps_at_episode = num_episodes * stop_eps_after - 1
    eps_delta = (eps - final_eps) / stop_eps_at_episode

    # loop over episodes
    for i_episode in range(1, num_episodes + 1):
        # monitor progress
        if i_episode % 1000 == 0:
            print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
            sys.stdout.flush()

        # generate episode with current policy and eps
        episode = generate_episode_eps_policy(env, Q, eps)
        eps -= eps_delta

        # for each state-action pair, get return and update q-table and policy
        Q, policy = improve_q_from_episode(Q, policy, episode, alpha, gamma,
                                           every_visit)

    return policy, Q
Exemplo n.º 6
0
def preprocess(data, maxlen_doc=15, maxlen_sentence=512):
    # (document/sentence/char)
    X = np.zeros((data['review'].shape[0], maxlen_doc, maxlen_sentence))
    y = data['sentiment']
    data_cleaned = data['review'].map(lambda review: clean(review))
    tokenizer = create_tokenizer(data_cleaned)

    # corpus_size = len(tokenizer.word_counts)

    for i, (review,
            label) in enumerate(zip(data['review'][:5],
                                    data['sentiment'][:5])):
        sentences = re.split(r'(?<!\w\.\w.)(?<![a-z]\.)(?<=\.|\?)\s', review)

        tokenized = tokenizer.texts_to_sequences(sentences)
        tokenized = pad_sequences(tokenized, maxlen_sentence)

        if (tokenized.shape[0] > maxlen_doc):
            tokenized = np.choice(tokenized, maxlen_doc)

        X[i][np.arange(min(tokenized.shape[0], maxlen_doc))] = tokenized

    # pp.pprint(X[0])

    return X, y, tokenizer
def generateWS(vocab, probdist, avg_length, sd):
    # Method to generate one word salad sentence usin unigram distribution
    # Vocab is a list of vocabulary words
    # probdist contains the probabilities of vocabulary words in same order
    # avg_length is the average length of sentences
    # sd is the standar deviation for the legths of sentences

    # Draw the length
    length = math.floor(random.gauss(avg_length, sd))
    if length < 6:
        length = 6
    # Draw the words
    draw = np.choice(vocab, length, probdist).tolist()
    # Assemble the sentence
    # Capitalize the first word in the sentence
    sentence = [capwords(draw.pop(0))]
    while draw:
        next_word = draw.pop(0)
        # Special case for punctuation that needs to be closed
        if next_word in ["(", "«"]:
            try:
                sentence.append(next_word)
                sentence.append(draw.pop(0))
                closing = ""
                if next_word == "(":
                    closing = ")"
                elif next_word == "«":
                    closing = "»"
                draw.insert(random.randint(0, len(draw)), closing)
            except IndexError:
                break
        elif next_word not in [")", "»"]:
            sentence.append(next_word)
    sentence.append(".")
    return sentence
Exemplo n.º 8
0
    def apply(self, races):
        """
        races: a dataframe with columns 'favourite' and 'odds'
        """
        if self.count == 0 or self.count == max:
            self.__reset()
            self.favourites = races[races.favourite == 1]
            self.favourites = np.random.sample()
        # elif not self.did_win:
        #     self.s

        self.spends.append(len(self.spends) + 1)
        # filter to favourites
        self.favourites = races[races.favourite == 1]
        self.favourites = np.choice(self.favourites, )
        # bet one unit on each favourite
        self.amount_spent = len(self.spends)
        # filter again to favourites that won
        self.winners = self.favourites[self.favourites.pos == 1]
        # add the money returned
        self.amount_returned = self.winners.odds.sum()
        self.cumulative_return += self.amount_returned
        self.winnings.append(self.cumulative_return)
        # profit
        self.profit = self.amount_returned - self.amount_spent
        # % gain or loss on total spent so far
        self.percentage_take = 100 * (
            self.amount_returned / self.amount_spent - 1)

        self.percentage_takes.append(self.percentage_take)

        return self.percentage_takes
Exemplo n.º 9
0
 def _sampleFromHalfFull(self, batch_size):
     try:
         p = self.priority[:self.pointer] / \
             np.sum(self.priority[:self.pointer])
         self.index = np.choice(self.pointer, batch_size, p=p)
     except ValueError:
         print('vals', self.sum, np.sum(self.priority))
         raise
     return [m[self.index] for m in self.memory]
Exemplo n.º 10
0
 def _sampleFromFull(self, batch_size):
     try:
         self.index = np.choice(self.length,
                                batch_size,
                                p=self.priority / np.sum(self.priority))
     except ValueError:
         print('vals', self.sum, np.sum(self.priority))
         raise
     return [m[self.index] for m in self.memory]
Exemplo n.º 11
0
def check_kkt_convergence(unbound_index):
    '''Check if the KKT conditions are satisfied, return a boolean value with an index of the first violation or -1
    (if there is no violation)'''

    out_index = np.choice(unbound_index)

    if len(unbound_index) == 0:
        return True, None
    else:
        return False, out_index
def Q02(Xnp, k):

    Wkp = []

    for j in range(k):

        Wkp.append(Xnp[np.choice(len(Xnp), k, replace=True)])

    Wkp = np.vstack(Wkp)

    return Wkp
Exemplo n.º 13
0
    def _stochastic_gradient_descent(self, data, label, lr, epochs, sample_rate):
        n_row, n_features = data.shape
        self.bias = np.random.normal(size=1)
        self.weights = np.random.normal(size=n_features)

        n_sample = sample_rate * n_row
        for _ in range(epochs):
            for i in choice(range(n_row), n_sample, replace=False):
                grad_bias, grad_weights = self._get_gradient(data[i], label[i])
                self.bias -= lr * grad_bias
                self.weights -= lr * grad_weights
Exemplo n.º 14
0
def simulation(player, b):
    while not (winner(b) or draw(b)):
        bflat = [bj for i in b for bj in bi]
        idx = choice([i for i, bj in enumerate(bflat) if bj == ' '])
        b = move(idx, player, b)
        player = nextPlayer(player)
    if draw(b):
        return 0
    if player == 'x':
        return -1
    return 1
        def _initializer(shape, dtype=dtype, partition_info=None):
            if shape is None or len(shape) != 2:
                raise ValueError('Only supports 2d shaped parameters.')
            fan_in = shape[-2]
            fan_out = shape[-1]
            if fan_in > fan_out:
                raise ValueError(
                    'Fan in should be less than or equal to fan out.')
            nparr = np.zeros(shape, dtype=dtype.as_numpy_dtype())
            if one2one:
                rows = np.choice(np.arange(fan_in), size=fan_in, replace=False)
                cols = np.choice(np.arange(fan_out),
                                 size=fan_in,
                                 replace=False)
            else:
                rows = np.random.randint(fan_in, size=fan_out)
                cols = np.arange(fan_out)

            nparr[rows, cols] = (2 * np.random.randint(2, size=len(rows)) -
                                 1) * scale_factor
            return tf.constant(nparr, dtype=dtype)
Exemplo n.º 16
0
def get_bootstrap(
    l: list,
    n,
    random_state=RANDOM_STATE,
    replace=True,
):
    if replace:
        random.seed(random_state)
        bootstrap = [get_random_item(l)
                     for _ in range(0, n)]  # faster than numpy.choice
        return bootstrap
    else:
        np.random.seed(random_state)
        bootstrap = np.choice(l, size=n, replace=False)
        return bootstrap
    def _stochastic_gradient_descent(self, data, label, lr, epochs,
                                     sample_rate):
        if data.ndim == 1:
            n_row, n_features = 1, data.shape[0]
        elif data.ndim == 2:
            n_row, n_features = data.shape
        else:
            raise ValueError("Invalid Dimension.")
        self.bias = np.random.normal(size=1)
        self.weights = np.random.normal(size=n_features)

        n_sample = sample_rate * n_row
        for _ in range(epochs):
            for i in choice(range(n_row), n_sample, replace=False):
                grad_bias, grad_weights = self._get_gradient(data[i], label[i])
                self.bias -= lr * grad_bias
                self.weights -= lr * grad_weights
Exemplo n.º 18
0
def index(request):
    # load the template
    template = loader.get_template('tag/index.html')

    # load tree variables
    [children_left, children_right, feature] = read_files()
    n_nodes = children_left.size
    # convert to JASON format
    json_children_left = json.dumps(children_left.tolist())
    json_children_right = json.dumps(children_right.tolist())
    json_feature = json.dumps(feature.tolist())

    # reading the user inputs
    if (request.POST.get('ans')):
        leaf_node = request.POST.get('ans')
        leaf_node = int(leaf_node)
        # make a prediction
        predicted_movie = make_prediction(leaf_node)  #returns an ndarray
        # randomly choose a movie if more than one returned
        if predicted_movie.size > 1:
            movie_list = [np.choice(predicted_movie)]
        else:
            movie_list = predicted_movie.tolist()

        movie_id = str(MovieRec.objects.get(name=movie_list[0]).movie_id)
        json_predicted_movie = json.dumps(movie_list)

    else:
        predicted_movie = ["none"]
        movie_id = None
        json_predicted_movie = json.dumps(predicted_movie)

    # create context to pass to the template
    context = {
        'json_children_left': json_children_left,
        'json_children_right': json_children_right,
        'json_feature': json_feature,
        'json_predicted_movie': json_predicted_movie,
        'predicted_movie_py': predicted_movie,
        'movie_id_py': movie_id,
        'n_nodes': n_nodes,
    }
    return HttpResponse(template.render(context, request))
Exemplo n.º 19
0
    def rreduce(self, typ, amnt, inplace=False):
        """
        *Uniformly downsample stored lags and field value differences for
        faster calculations and smaller memory size. Object points and field
        values are not affected. Resulting Variogram objects are also marked
        with a self.reduced = True flag to indicate that the self.lags and
        self.diffs are not reflective of all combinations of self.x and self.f.

        Parameters
        ----------
        typ : str
            Descriptor of format of reduction amount passed to amnt. Can be
            one of:
                * "abs" : Size of remaining lag and field data difference
                array specified as an absolute size.
                * "frac" : Size of remaining lag and field data difference
                array specified as a fraction of original size.
        amnt : int, float
            Amount to reduce lag and field difference data vectors. Specific
            formats given below for each typ option.
                * "abs" : int giving the size of the remaining lag and
                    field data difference vectors.
                * "frac" : float between 0 and 1 describing how much of lag
                    and field difference should remain as fraction of original
                    size
        inplace : bool
            Whether or not object is manipulated inplace. If False, will return
            variogram object with same x and f values but with the reduced lag
            domain.

        Returns
        -------
        new (optional) : Variogram
            New Variogram instance with same x and f values but reduced lag
            domain.
        """
        if typ == "frac":
            size = int(amnt * self.lags.size)
        if typ == "abs":
            size = amnt

        ids = np.choice(self.lags.size, size)
        self.rm_ids(ids, inplace)
def fitter_df_maker(lig_id: int) -> Tuple[pd.DataFrame, pd.DataFrame]:

    def labeled_(psqs):
        labeled_seqs = sequences.loc[sequences.index.isin(psqs)] # hidden_0

        labeled_seqs_known = labeled_seqs.sample(frac=0.75)

        labeled_seqs_hidden = labeled_seqs.loc[~labeled_seqs.index.isin(labeled_seqs_known.index)]
        return labeled_seqs_known, labeled_seqs_hidden

    positive_seq_ids = binding.loc[binding.lig_idx==lig_id, 'seq_idx'].values

    if len(positive_seq_ids) > s_: # we want unlabeled to be dominant
        positive_seq_ids = np.choice(positive_seq_ids, s_)
    if len(positive_seq_ids) > 5:
        unlabeled_seqs = (sequences.loc[~sequences.index.isin(positive_seq_ids)]
                          .sample(n = sample_size-len(positive_seq_ids))) #

        labeled_seqs_known, labeled_seqs_hidden = labeled_(positive_seq_ids)

        unlabeled_seqs['bind'] = np.zeros(unlabeled_seqs.shape[0]) # equiv to df_seq_sub_neg.loc[:,"bind"] = 0

        labeled_seqs_known['bind'] = np.ones(labeled_seqs_known.shape[0])

        labeled_seqs_hidden['bind'] = np.zeros(labeled_seqs_hidden.shape[0])

        df_fitter = pd.concat([unlabeled_seqs, labeled_seqs_known, labeled_seqs_hidden])

        X = pd.DataFrame(tfidf.transform(df_fitter.sequence.values).toarray(),
                         columns=tfidf.get_feature_names(),
                         index=df_fitter.index)

        y = df_fitter.bind
        #print(lig_id, X.shape, y.shape)
        return X,y
    else:
        raise Exception
def train(sess, network_architecture, inputs, input_configs, cur_path, clamped_train=False,\
          learning_rate=0.00001,
          batch_size=50, training_epochs=10, display_step=1,
          controlled_z=2, rc_loss=1, kl_loss=1, loaded=False):

    train_writer = tf.summary.FileWriter(os.path.join(cur_path, 'tb/train/'))
    vae = VariationalAutoencoder(sess, network_architecture, 
                                 learning_rate=learning_rate, 
                                 batch_size=batch_size,
                                 controlled_z=controlled_z,
                                 rc_loss=rc_loss,
                                 kl_loss=kl_loss)
    total_iters = training_epochs * inputs.shape[0] / batch_size
    # Training cycle
    for iters in range(total_iters):
        if not clamped_train: 
            batch_xs = inputs[np.random.choice(range(inputs.shape[0]), batch_size, replace=False),:]

            # Fit training using batch data
            cost, smr, kl, rc = vae.partial_fit(batch_xs, -1)
        else:
            type_to_train = random.choice([[t] * input_configs[t]["ratio"] \
            for t in input_configs.keys()])       
            same_config_inputs = random.choice(input_configs[type_to_train]["index"])
            inputs_index_to_use = np.choice(same_config_inputs, batch_size, replace=False)
            batch_xs = all_inputs[inputs_index_to_use, :]
        # Fit training using batch data
            cost, smr, kl, rc = vae.partial_fit(batch_xs, input_configs[type_to_train]["z"])
  
        train_writer.add_summary(smr, iters)
        # Display logs per epoch step
        if iters % display_step == 0:
            print "iters:", '%04d' % (iters+1), \
                  "total_loss=", "{:.9f}".format(cost),\
                  "kl_loss=", "{:.9f}".format(kl),\
                  "rc_loss=", "{:.9f}".format(rc)
    return vae
Exemplo n.º 22
0
uids = np.load('uids.npy')
iids = np.load('iids.npy')
rs = np.load('rs.npy')

sorted_df = df.sort('uid')
uid = data_frame_to_array(sorted_df.select('uid'))
iid = data_frame_to_array(sorted_df.select('iid'))
r = data_frame_to_array(sorted_df.select('r'))

c = data_frame_to_array(df.groupBy('uid').count().sort('uid').select('count'))
segment = np.cumsum(c) - c
quotient, reminder = np.divmod(c, n_partitions)
offsets = [np.zeros_like(c)]
for i in range(n_partitions):
    offsets.append(offsets[-1] + quotient + (i < reminder))
indices = [
    utils.arange(segment + p, segment + q)
    for p, q in zip(offsets[:-1], offsets[1:])
]

uids = list(map(uid.__getitem__, indices))
iids = list(map(iid.__getitem__, indices))
rs = list(map(r.__getitem__, indices))

np.save('partitions/uids', uids)
np.save('partitions/iids', iids)
np.save('partitions/rs', rs)

indices = np.choice()
Exemplo n.º 23
0
def get_rand_I_rows(n, r, p=None):

    rows = np.choice(range(n), size=r, replace=False, p=p)

    return np.identity(n)[rows, :]
Exemplo n.º 24
0
 def edge_add(self):
     self.neigh = np.concatenate(
         (np.choice(setdiff1d(settings.N_arr, self.neigh), 1), self.neigh),
         axis=None)
    def train(self, data):
        '''
			Reference to piotr dollar's Computer Vision matlab toolbox

			INPUTS
				data       - Type: DataBin. Data for training tree.

			OUTPUTS
			tree       - (dict)learned decision tree model struct with the following keys:
				fids       - [Kx1] feature ids for each node
				thrs       - [Kx1] threshold corresponding to each fid
				child      - [Kx1] index of child for each node (1-indexed)
				hs         - [Kx1] log ratio (.5*log(p/(1-p)) at each node
				weights    - [Kx1] total sample weight at each node
				depth      - [Kx1] depth of each node
			data       - data used for training tree (quantized version of input)
			err        - decision tree training error
		'''
        if not isinstance(data, DataBin):
            raise TypeError('DataBin object type is required.')

        if not data.quant:
            data.quantize()

        #Initialize arrays
        (NP, FP) = data.posSamp.shape
        (NN, FN) = data.negSamp.shape
        assert FP == FN
        F = FP

        tree = dict()
        maxNodes = 2**(self.pTree.maxDepth +
                       1) - 1  #Maximum number of nodes in BinaryTree
        tree['fids'] = np.zeros(maxNodes, dtype='uint32')
        tree['thrs'] = np.zeros(maxNodes, dtype='float64')
        tree['child'] = np.zeros(maxNodes, dtype='uint32')
        tree['hs'] = np.zeros(maxNodes, dtype='float64')
        tree['weights'] = np.zeros(maxNodes, dtype='float64')
        tree['depth'] = np.zeros(maxNodes, dtype='uint32')
        errs = np.zeros(maxNodes, dtype='float64')

        #Train Decision tree
        curNode = 0  #Current Node's id
        lastNode = 1  #Last Node's id that has been yield
        nodePosWtList = [
            None
        ] * maxNodes  #an assemble of nodes' samples weight(if a sample does not reaches the node, its weight = 0)
        nodeNegWtList = [None] * maxNodes
        nodePosWtList[0] = data.posWt
        nodeNegWtList[0] = data.negWt

        while curNode < lastNode:
            nodePosWt = nodePosWtList[curNode]
            nodeNegWt = nodeNegWtList[curNode]
            nodePosWtList[curNode] = None
            nodeNegWtList[curNode] = None
            nodePosWtSum = np.sum(nodePosWt)
            nodeNegWtSum = np.sum(nodeNegWt)
            nodeWtSum = nodePosWtSum + nodeNegWtSum

            tree['weights'][curNode] = nodeWtSum
            prior = nodePosWtSum / nodeWtSum
            errs[curNode] = min(prior, 1 - prior)
            constant = np.e**8 / (1 + np.e**8)
            alpha =  4.0 if (prior > constant) else \
              -4.0 if (prior < 1 - constant) else \
              0.5 * log(prior / (1 - prior))
            tree['hs'][curNode] = alpha
            #alpha = 0.5 * log(prior / (1 - prior))
            #tree['hs'][curNode] = max(-4.0, min(4.0, alpha))

            #Node's classification is nearly pure, node's depth is out of scale, sum of node samples' weight is out of scale
            if (prior < 1e-3 or prior > 1 - 1e-3) or (
                    tree['depth'][curNode] >=
                    self.pTree.maxDepth) or (nodeWtSum < self.pTree.minWeight):
                curNode += 1
                continue

            #Find best tree stump
            #wheather subsample the features or not
            if self.pTree.fracFtrs < 1:
                stumpFtrsId = np.choice(np.arange(F),
                                        floor(self.pTree.fracFtrs *
                                              F)).astype('uint32')
            else:
                stumpFtrsId = np.arange(F, dtype='uint32')

            (stumpErrs, stumpThrs) = self.bestStump(data,
                                                    nodePosWt / nodeWtSum,
                                                    nodeNegWt / nodeWtSum,
                                                    stumpFtrsId, prior)

            bestFtrsId = np.argmin(stumpErrs)
            bestThrs = stumpThrs[bestFtrsId] + 0.5
            bestFtrsId = stumpFtrsId[bestFtrsId]

            #Split node
            leftChlidPosWt = data.quantPosSamp[:,
                                               bestFtrsId] < bestThrs  #Node's left child's positive samples' weights
            leftChlidNegWt = data.quantNegSamp[:, bestFtrsId] < bestThrs
            if (np.any(leftChlidPosWt) or np.any(leftChlidNegWt)) and (
                    np.any(~leftChlidPosWt)
                    or np.any(~leftChlidNegWt)):  #Invalid stump classifier
                #Inverse quantization
                bestThrs = data.xMin[bestFtrsId] + bestThrs * (
                    data.xMax[bestFtrsId] -
                    data.xMin[bestFtrsId]) / (self.pTree.nBins - 1)
                nodePosWtList[lastNode] = leftChlidPosWt * nodePosWt
                nodeNegWtList[lastNode] = leftChlidNegWt * nodeNegWt
                nodePosWtList[lastNode + 1] = (~leftChlidPosWt) * nodePosWt
                nodeNegWtList[lastNode + 1] = (~leftChlidNegWt) * nodeNegWt

                tree['thrs'][curNode] = bestThrs
                tree['fids'][curNode] = bestFtrsId
                tree['child'][curNode] = lastNode
                tree['depth'][lastNode:lastNode +
                              2] = tree['depth'][curNode] + 1

                lastNode += 2

            curNode += 1

        #Modefy parameter 'tree':
        tree['fids'] = tree['fids'][0:lastNode].copy()
        tree['thrs'] = tree['thrs'][0:lastNode].copy()
        tree['child'] = tree['child'][0:lastNode].copy()
        tree['hs'] = tree['hs'][0:lastNode].copy()
        tree['weights'] = tree['weights'][0:lastNode].copy()
        tree['depth'] = tree['depth'][0:lastNode].copy()
        err = np.sum(errs[0:lastNode] * tree['weights'] *
                     (tree['child'] == 0))  #Sum up the leaf nodes' error

        #return
        self.tree = tree
        self.err = err
Exemplo n.º 26
0
mylist = datastore.obs_list((obsid[0]))
mylist = datastore.obs_list(*(obsid[0]))
mylist = datastore.obs_list((obsid[0],))
mylist
mylist[0]
mylist[1]
sim
print sim.result
get_ipython().magic(u'pinfo np.random.choice')
nbkg=356
alpha=0.1
nOFF=356
nbkg=alpha*nOFF
nbkg = np.random.poisson(nbkg)
nbkg
np.choice(np.arange(nOFF),nbkg,replace=False)
np.rando;.choice(np.arange(nOFF),nbkg,replace=False)
np.random.choice(np.arange(nOFF),nbkg,replace=False)
idxON=np.random.choice(np.arange(nOFF),nbkg,replace=False)
get_ipython().magic(u'pinfo fakerun.bkg')
get_ipython().magic(u'pinfo fakerun')
sim.alpha
sim.off_vector
sim = SpectrumSimulation(aeff=aeff, edisp=edisp, source_model=model, livetime=livetime)
sim.simulate_obs(seed=42, obs_id=0)
sim.obs.peek()
sim.off_vector
sim.on_vector
sim.on_vector.energy
sim.on_vector.energy.bins
sim.on_vector.energy.hi