Esempio n. 1
0
    def acc(self):
        """Compute the per-class accuracies and the overall accuracy.

        Args:
            scores (torch.FloatTensor, shape (B?, C, N):
                raw scores for each class.
            labels (torch.LongTensor, shape (B?, N)):
                ground truth labels.

        Returns:
            A list of floats of length num_classes+1.
            Consists of per class accuracy. Last item is Overall Accuracy.
        """
        if self.confusion_matrix is None:
            return None

        accs = []
        for label in range(self.num_classes):
            tp = np.longlong(self.confusion_matrix[label, label])
            fn = np.longlong(self.confusion_matrix[label, :].sum()) - tp

            if tp + fn == 0:
                acc = float('nan')
            else:
                acc = tp / (tp + fn)

            accs.append(acc)

        accs.append(np.nanmean(accs))

        return accs
Esempio n. 2
0
    def getIouScoreForLabel(self, label):
        # Calculate and return IOU score for a particular label (train_id)
        if label == self.voidClass:
            return float('nan')

        # the number of true positive pixels for this label
        # the entry on the diagonal of the confusion matrix
        tp = np.longlong(self.confMatrix[label, label])

        # the number of false negative pixels for this label
        # the row sum of the matching row in the confusion matrix
        # minus the diagonal entry
        fn = np.longlong(self.confMatrix[label, :].sum()) - tp

        # the number of false positive pixels for this labels
        # Only pixels that are not on a pixel with ground truth label that is ignored
        # The column sum of the corresponding column in the confusion matrix
        # without the ignored rows and without the actual label of interest
        notIgnored = [
            l for l in self.validClasses
            if not l == self.voidClass and not l == label
        ]
        fp = np.longlong(self.confMatrix[notIgnored, label].sum())

        # the denominator of the IOU score
        denom = (tp + fp + fn)
        if denom == 0:
            return float('nan')

        # return IOU
        return float(tp) / denom
def getIouScoreForLabel(label, confMatrix, args):
    if id2label[label].ignoreInEval:
        return float('nan')

    # the number of true positive pixels for this label
    # the entry on the diagonal of the confusion matrix
    tp = np.longlong(confMatrix[label, label])

    # the number of false negative pixels for this label
    # the row sum of the matching row in the confusion matrix
    # minus the diagonal entry
    fn = np.longlong(confMatrix[label, :].sum()) - tp

    # the number of false positive pixels for this labels
    # Only pixels that are not on a pixel with ground truth label that is ignored
    # The column sum of the corresponding column in the confusion matrix
    # without the ignored rows and without the actual label of interest
    notIgnored = [
        l for l in args.evalLabels
        if not id2label[l].ignoreInEval and not l == label
    ]
    fp = np.longlong(confMatrix[notIgnored, label].sum())

    # the denominator of the IOU score
    denom = (tp + fp + fn)
    if denom == 0:
        return float('nan')

    # return IOU
    return float(tp) / denom
def decode(code, a):
    length = len(code)
    decode = list(range(length))
    decode[0] = np.longlong(code[0])
    for i in range(length-1):
        decode[i+1] = np.longlong(decode[i])*u +(code[i+1]-7.5)*a
    return decode
Esempio n. 5
0
    def iou(self):
        """Compute the per-class IoU and the mean IoU.

        Args:
            scores (torch.FloatTensor, shape (B?, C, N):
                raw scores for each class.
            labels (torch.LongTensor, shape (B?, N)):
                ground truth labels.

        Returns:
            A list of floats of length num_classes+1.
            Consists of per class IoU. Last item is mIoU.
        """
        if self.confusion_matrix is None:
            return None

        ious = []
        for label in range(self.num_classes):
            tp = np.longlong(self.confusion_matrix[label, label])
            fn = np.longlong(self.confusion_matrix[label, :].sum()) - tp
            fp = np.longlong(self.confusion_matrix[:, label].sum()) - tp

            if tp + fp + fn == 0:
                iou = float('nan')
            else:
                iou = (tp) / (tp + fp + fn)

            ious.append(iou)

        ious.append(np.nanmean(ious))

        return ious
Esempio n. 6
0
def decode(code, a):
    length = len(code)
    decode = list(range(length))
    decode[0] = np.longlong(code[0])
    for i in range(length-1):
        #当前解码值等于上一次解码后的值加上压缩后的差值乘上量化因子
        decode[i+1] = np.longlong(decode[i])*u +(code[i+1]-7.5)*a
    return decode
 def snr(self, data1, data2):
     length = len(data1)
     sum1 = np.longlong(0)
     sum2 = np.longlong(0)
     for i in range(length - 1):
         n1 = np.longlong(data1[i])**2 / length  # np.longlong防止计算时溢出
         sum1 = sum1 + n1
         n2 = np.longlong(data1[i] - data2[i])**2 / length
         sum2 = sum2 + n2
     return 10 * np.log10(sum1 / sum2)
Esempio n. 8
0
def getIouScoreForLabel(label, cm):
    tp = np.longlong(cm[label, label])
    fn = np.longlong(cm[label, :].sum()) - tp
    fp = np.longlong(cm[:, label].sum()) - tp
    # the denominator of the IOU score
    denom = (tp + fp + fn)
    if denom == 0:
        return float('nan')
    # return IOU
    return float(tp) / denom
Esempio n. 9
0
def get_iou(label_id, confusion, num_classes):
    tp = np.longlong(confusion[label_id, label_id])
    fn = np.longlong(confusion[label_id, 1:].sum()) - tp
    not_ignored = [l for l in range(1, num_classes) if not l == label_id]
    fp = np.longlong(confusion[not_ignored, label_id].sum())
    denom = (tp + fp + fn)
    if denom == 0:
        return (1, 1, 1, 1)
    return (float(tp + 1e-6) / (denom + 1e-6),
            float(tp + 1e-6) / float(tp + fn + 1e-6), tp, denom)
Esempio n. 10
0
def get_acc(label_id, confusion):
    if not label_id in VALID_CLASS_IDS:
        return float('nan')
    # #true positives
    tp = np.longlong(confusion[label_id, label_id])
    # #false negatives
    fn = np.longlong(confusion[label_id, :].sum()) - tp
    denom = (tp + fn)
    if denom == 0:
        return float('nan')
    return (float(tp) / denom, tp, denom)
Esempio n. 11
0
def get_iou(label_id, confusion):
    if not label_id in VALID_CLASS_IDS:
        return float('nan')
    tp = np.longlong(confusion[label_id, label_id])
    fn = np.longlong(confusion[label_id, 0:].sum()) - tp
    not_ignored = [l for l in VALID_CLASS_IDS if not l == label_id]
    fp = np.longlong(confusion[not_ignored, label_id].sum())
    denom = (tp + fp + fn)
    if denom == 0:
        return (1, 1, 1, 1)
    return (float(tp) / denom, float(tp) / float(tp + fn + 1e-6), tp, denom)
Esempio n. 12
0
def get_iou(label_id, confusion):
    # true positives
    tp = np.longlong(confusion[label_id, label_id])
    # false positives
    fp = np.longlong(confusion[label_id, :].sum()) - tp
    # false negatives
    fn = np.longlong(confusion[:, label_id].sum()) - tp

    denom = (tp + fp + fn)
    if denom == 0:
        return (0, 0, 0)
    return (float(tp) / denom, tp, denom)
Esempio n. 13
0
def get_iou(label_id, confusion):
    # true positives
    tp = np.longlong(confusion[label_id, label_id])
    # false negatives
    fn = np.longlong(confusion[label_id, :].sum()) - tp
    # false positives
    not_ignored = [l for l in data.VALID_CLASS_IDS if not l == label_id]
    fp = np.longlong(confusion[not_ignored, label_id].sum())

    denom = (tp + fp + fn)
    if denom == 0:
        return float('nan')
    return (float(tp) / denom, tp, denom)
Esempio n. 14
0
def snr(f):
    cn, wlen, sc = DPCM_encode(f)
    dc = DPCM_decode(f)
    s = 0
    n = 0
    for a in range(len(sc)):
        af = numpy.longlong(dc[a])
        bef = numpy.longlong(sc[a])
        p = af * af
        q = (af - bef) * (af - bef)
        s = s + p
        n = n + q
    return numpy.log10(s / n) * 10
Esempio n. 15
0
 def get_class_scores(self):
     scores = []
     for i in range(self.class_num):
         tp = np.longlong(self.conf_mat[i, i])
         gti = np.longlong(self.conf_mat[i, :].sum())
         resi = np.longlong(self.conf_mat[:, i].sum())
         denom = gti + resi - tp
         try:
             res = float(tp) / denom
         except ZeroDivisionError:
             res = 0
         scores.append(res)
     return scores
Esempio n. 16
0
def get_iou(label_id, confusion):
    # true positives
    tp = np.longlong(confusion[label_id, label_id])
    # false negatives
    fn = np.longlong(confusion[label_id, :].sum()) - tp
    # false positives
    not_ignored = [l for l in range(20) if not l == label_id]
    fp = np.longlong(confusion[not_ignored, label_id].sum())

    denom = (tp + fp + fn)
    if denom == 0:
        return False
    return (float(tp) / denom, tp, denom)
Esempio n. 17
0
def energy(waveData):
    wlen = len(waveData)
    step = 256  # 每帧采样点数
    frameNum = math.ceil(wlen / step)  #函数返回数字的上入整数,帧数
    ener = []
    for i in range(frameNum):
        curFrame = waveData[np.arange(i * step, min(i * step + step, wlen))]
        sum = np.longlong(0)  #防止溢出
        for j in range(len(curFrame)):
            n = np.longlong(curFrame[j])**2  #np.longlong防止计算时溢出
            sum = sum + n
        ener.append(sum)
    return ener
Esempio n. 18
0
def PhaseKai2opt(k_vector, noisy_original_image_fft, system_otf):
    w = np.shape(noisy_original_image_fft)[0]
    wo = np.int(w / 2)

    noisy_original_image_fft = noisy_original_image_fft * (
        1 - 1 * system_otf**10)  #Increase the contrast by denoising
    noisy_original_image_fft = noisy_original_image_fft * np.conj(
        system_otf
    )  #Build term for minimisation (highlights places where i term is similar)
    otf_cutoff = otf_smooth(system_otf)
    DoubleMatSize = 0

    if (
            2 * otf_cutoff > wo
    ):  #Contingency for the situtation where the size of the FFT is not large enough to fit in extra frequency info from SIM reconstruction
        DoubleMatSize = 1

    if (DoubleMatSize > 0):
        t = 2 * w

        noisy_original_image_fft_temp = np.zeros((t, t))
        noisy_original_image_fft_temp[wo:w + wo,
                                      wo:w + wo] = noisy_original_image_fft
        noisy_original_image_fft = noisy_original_image_fft_temp
    else:
        t = w

    to = np.int(t / 2)
    u = np.linspace(0, t - 1, t)
    v = np.linspace(0, t - 1, t)
    [U, V] = np.meshgrid(u, v)

    # Build term for comparison in cross-correlation (image with frequency added to it)
    noisy_image_freqadd = np.exp(
        -1j * 2 * np.pi * (k_vector[1] / t * (U - to)) +
        (k_vector[0] / t * (V - to))) * fft.ifft2(noisy_original_image_fft)
    noisy_image_freqadd_fft = fft.fft2(noisy_image_freqadd)

    mA = np.longlong(
        np.sum(noisy_original_image_fft * np.conj(noisy_image_freqadd_fft))
    )  # Sum across pixels of product of image with complex conjugate with frequency introduced.
    mA = mA / np.longlong(
        (np.sum(noisy_image_freqadd_fft * np.conj(noisy_image_freqadd_fft))
         ))  # Normalising cross-correlation term
    #print(type(mA))
    #print(-np.abs(mA))
    correlation_FOM = -abs(
        mA
    )  # Negative absolute value allows for minimisation; FOM = figure of merit

    return (correlation_FOM)
Esempio n. 19
0
def get_iou(label_id, confusion):
    # true positives
    tp = np.longlong(confusion[label_id, label_id])
    # false negatives
    fn = np.longlong(confusion[label_id, :].sum()) - tp
    # false positives
    not_ignored = [l for l in VALID_CLASS_IDS if not l == label_id]
    fp = np.longlong(confusion[not_ignored, label_id].sum())

    denom = (tp + fp + fn)
    if denom == 0:  #I guess that happens if a class is not represented in the training data
        #return float('nan')
        return (0, 0, 0)
    return (float(tp) / denom, tp, denom)
Esempio n. 20
0
def cal_snr(before, after):
    x = np.longlong(0)
    y = np.longlong(0)
    length = len(before)
    for i in range(length):
        # print('before:'+str(before[i]))
        # print('after:'+str(after[i]))
        x += before[i]**2 / length  # 防止溢出
        y += (before[i] - after[i])**2 / length  # 防止溢出
        # print('x:'+str(x))
        # print('y:'+str(y))
    # print(x)
    # print(y)
    return 10 * np.log10(x / y)
Esempio n. 21
0
def get_iou(label_id, confusion):
    if not label_id in VALID_INSTANCE_ID:
        return (float('nan'), 0, 0)
    # #true positives
    tp = np.longlong(confusion[label_id, label_id])
    # #false negatives
    fn = np.longlong(confusion[label_id, :].sum()) - tp
    # #false positives
    not_ignored = [l for l in VALID_INSTANCE_ID if not l == label_id]
    fp = np.longlong(confusion[not_ignored, label_id].sum())

    denom = (tp + fp + fn)
    if denom == 0:
        return (float('nan'), 0, 0)
    return (float(tp) / denom, tp, denom)
def eval_ious(mat):
    ious = np.zeros(3)
    for l in range(2):
        tp = np.longlong(mat[l, l])
        fn = np.longlong(mat[l, :].sum()) - tp

        notIgnored = [i for i in range(2) if not i == l]
        fp = np.longlong(mat[notIgnored, l].sum())
        denom = (tp + fp + fn)
        if denom == 0:
            print('error: denom is 0')

        ious[l] = float(tp) / denom

    return ious[:-1]
Esempio n. 23
0
def generalized_eval_ious(mat):
    n = mat.shape[0]
    ious = np.zeros(n)
    for l in range(n):
        tp = np.longlong(mat[l, l])
        fn = np.longlong(mat[l, :].sum()) - tp

        notIgnored = [i for i in range(n) if not i == l]
        fp = np.longlong(mat[notIgnored, l].sum())
        denom = (tp + fp + fn)
        if denom == 0:
            print('error: denom is 0')

        ious[l] = float(tp) / denom
    return ious
Esempio n. 24
0
 def __init__(self):
     self.tally_values = np.zeros([1])
     self.unc_values = np.zeros([1])
     self.xb = np.zeros([2])
     self.yb = np.zeros([2])
     self.zb = np.zeros([2])
     self.nps = np.longlong(0)
Esempio n. 25
0
def test_make_info():
    """Test some create_info properties."""
    n_ch = np.longlong(1)
    info = create_info(n_ch, 1000., 'eeg')
    assert set(info.keys()) == set(RAW_INFO_FIELDS)

    coil_types = {ch['coil_type'] for ch in info['chs']}
    assert FIFF.FIFFV_COIL_EEG in coil_types

    pytest.raises(TypeError, create_info, ch_names='Test Ch', sfreq=1000)
    pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=-1000)
    pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=1000,
                  ch_types=['eeg', 'eeg'])
    pytest.raises(TypeError, create_info, ch_names=[np.array([1])],
                  sfreq=1000)
    pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000,
                  ch_types=np.array([1]))
    pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000,
                  ch_types='awesome')
    pytest.raises(TypeError, create_info, ['Test Ch'], sfreq=1000,
                  montage=np.array([1]))
    m = make_standard_montage('biosemi32')
    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg')
    info.set_montage(m)
    ch_pos = [ch['loc'][:3] for ch in info['chs']]
    ch_pos_mon = m._get_ch_pos()
    ch_pos_mon = np.array(
        [ch_pos_mon[ch_name] for ch_name in info['ch_names']])
    # transform to head
    ch_pos_mon += (0., 0., 0.04014)
    assert_allclose(ch_pos, ch_pos_mon, atol=1e-5)
Esempio n. 26
0
def p1(z, x):
    k = 0
    s = np.longlong(0.0)

    for i in z:
        s += i * pow(x, k)
        k += 1
Esempio n. 27
0
    def make_Ranging_Code_Table(self):

        # --- Find number of samples per spreading code ----------------------------
        samples_Per_Code = self.samples_Per_Code

        # --- Prepare the output matrix to speed up function -----------------------
        Ranging_Code_Table = np.zeros((37, samples_Per_Code))

        # --- Find time constants --------------------------------------------------
        ts = 1.0 / self.sampling_Freq

        tc = 1.0 / self.code_Freq_Basis

        # === For all satellite PRN-s ...
        for PRN in range(37):
            # --- Generate Ranging code for given PRN -----------------------------------
            Ranging_Code = self.generate_Ranging_Code(PRN)

            # --- Make index array to read Ranging code values -------------------------

            code_Value_Index = np.floor(ts * np.arange(0, samples_Per_Code) /
                                        tc)
            code_Value_Index = np.longlong(code_Value_Index)

            code_Value_Index[-1] = 2045  ## is equal to 2045

            Ranging_Code_Table[PRN] = Ranging_Code[code_Value_Index]

        return Ranging_Code_Table
Esempio n. 28
0
def rightNum(testX, testy, w, v):
    pred = np.dot(w, testX.T).T + np.longlong(
        np.sum((np.dot(testX, v)**2 - np.dot(testX**2, v**2)), axis=1).reshape(
            len(testX), 1)) / 2.0
    pred[pred >= 0] = 1
    pred[pred < 0] = 0
    return np.sum(pred == testy)
Esempio n. 29
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
Esempio n. 30
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
 def decode(self, code, a, flag):
     length = len(code)
     decode = list(range(length))
     decode[0] = np.longlong(code[0])
     # 量化因子法
     if flag == 0:
         for i in range(length - 1):
             decode[i + 1] = np.longlong(decode[i]) + (code[i + 1] - 8) * a
     else:
         for i in range(length - 1):
             if code[i + 1] >= 8:  # 负数
                 decode[i + 1] = decode[i] - math.exp(code[i + 1] -
                                                      8) + 1  # 解码,用于反馈
             else:
                 decode[i + 1] = decode[i] + math.exp(
                     code[i + 1]) - 1  # 解码,用于反馈
     return decode
def history_to_json(fname, snapshot, id):
    """Generate json version of merger history."""

    id = np.longlong(id)
    meraxes.set_little_h(fname)

    gals = meraxes.read_gals(fname, snapshot, props=PROPS, quiet=True)
    snaplist, _, _ = meraxes.read_snaplist(fname)

    # get the ind of our start galaxy
    ind = np.where(gals['ID'] == id)[0]
    assert(len(ind) == 1)
    ind = ind[0]

    # read in the walk indices
    fp_ind = deque()
    np_ind = deque()
    for snap in tqdm(snaplist[:snapshot+1], desc="Reading indices"):
        try:
            fp_ind.append(meraxes.read_firstprogenitor_indices(fname, snap))
        except:
            fp_ind.append([])
        try:
            np_ind.append(meraxes.read_nextprogenitor_indices(fname, snap))
        except:
            np_ind.append([])

    # generate the graph (populates global variable `tree`)
    walk(snapshot, ind, fp_ind, np_ind)

    # attach the galaxies to the graph
    for snap in tqdm(snaplist[:snapshot+1], desc="Generating graph"):
        try:
            gal = meraxes.read_gals(fname, snapshot=snap, quiet=True,
                                    props=PROPS)
        except IndexError:
            continue
        for nid in tree.nodes_iter():
            node = tree.node[nid]
            gal_snap, gal_ind = [int(v) for v in nid.split('|')]
            if gal_snap == snap:
                add_galaxy_to_node(node, gal[gal_ind])

    # dump the tree to json
    data = json_graph.tree_data(tree, root=node_id(snapshot, ind),
                                attrs=dict(id='name', children='children'))
    fname_out = "tree_%09d.json" % id
    with open(fname_out, "wb") as fd:
        json.dump(data, fd)

    print("Conversion complete: %s" % fname_out)
Esempio n. 33
0
def test_make_info():
    """Test some create_info properties."""
    n_ch = np.longlong(1)
    info = create_info(n_ch, 1000., 'eeg')
    assert set(info.keys()) == set(RAW_INFO_FIELDS)

    coil_types = set([ch['coil_type'] for ch in info['chs']])
    assert FIFF.FIFFV_COIL_EEG in coil_types

    pytest.raises(TypeError, create_info, ch_names='Test Ch', sfreq=1000)
    pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=-1000)
    pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=1000,
                  ch_types=['eeg', 'eeg'])
    pytest.raises(TypeError, create_info, ch_names=[np.array([1])],
                  sfreq=1000)
    pytest.raises(TypeError, create_info, ch_names=['Test Ch'], sfreq=1000,
                  ch_types=np.array([1]))
    pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000,
                  ch_types='awesome')
    pytest.raises(TypeError, create_info, ['Test Ch'], sfreq=1000,
                  ch_types=None, montage=np.array([1]))
    m = read_montage('biosemi32')
    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
                       montage=m)
    ch_pos = [ch['loc'][:3] for ch in info['chs']]
    assert_array_equal(ch_pos, m.pos)

    names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
    d = read_dig_montage(hsp_fname, None, elp_fname, names, unit='m',
                         transform=False)
    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
                       montage=d)
    idents = [p['ident'] for p in info['dig']]
    assert FIFF.FIFFV_POINT_NASION in idents

    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
                       montage=[d, m])
    ch_pos = [ch['loc'][:3] for ch in info['chs']]
    assert_array_equal(ch_pos, m.pos)
    idents = [p['ident'] for p in info['dig']]
    assert (FIFF.FIFFV_POINT_NASION in idents)
    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
                       montage=[d, 'biosemi32'])
    ch_pos = [ch['loc'][:3] for ch in info['chs']]
    assert_array_equal(ch_pos, m.pos)
    idents = [p['ident'] for p in info['dig']]
    assert (FIFF.FIFFV_POINT_NASION in idents)
    assert info['meas_date'] is None
Esempio n. 34
0
def Import_MCNPX_output(filename,
                        DOSE_FACTOR=1.0, 
                        MESH_NUM=1,
                        VERBOSE=0):
    """ 
    ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
    ;+
    ;AUTHOR: Justin Mikell, [email protected], justin[Dot][email protected]
    ;NAME: Import_MCNPX_output
    ;PURPOSE: to import a mesh tally (1D,2D, or 3D) from an MCNPX output file
    ;         
    ;INPUTS:        
    ;                
    ;CATEGORY: Monte Carlo, MCNPX
    ;CALLING SEQUENCE: resultStruct = Import_MCNPX_output(filename [,/VERBOSE]) 
    ;KEYWORDS:    
    ;           DOSE_FACTOR: if set will multiply the tally data by this factor (default value of 1.0)
    ;           /POS_VOLUME_OBJ: if set will return a pos volume object
    ;           /VERBOSE: if set will print out information to console
    ;           
    ;RESTRICTIONS:
    ;EXAMPLE:
    ;-
    ;MODIFICATION HISTORY:
    ;Created on 8-06-2010 by Justin Mikell
    ;;Added removal of extra white space 8-19-2010 JM
    ;;Added number of particles simulated to return struct 8-28-2010 JM
    ;TODO:  
    ;       -add support for multiple meshes
    ;       -add support for 1D
    ;       -add support 2D
    ;       
    ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
    ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
    ; docformat = 'rst'
    ;+
    ; :Author: Justin Mikell
    ;           justin[Dot]mikell(AT]gmail.com
    ;
    ; :Description:
    ;    Describe the procedure.
    ;
    ; :Uses:
    ;            List the procedures/functions that this method calls here.
    ;
    ; :Params:
    ;    filename: in, optional, type=string
    ;    filename represents an MCNPX mesh tally outputfile with the following
    ;    format::
    ;    
    ;       line 1: comment
    ;       line 2: number_of_meshes number_of_particles_simulated
    ;         line 3: ignore particle_type?
    ;         line 4: 3rd integer is number of x boundaries (nx+1) 
    ;                 4th integer is number of y boundaries (ny+1)
    ;                 5th integer is number of z boundaries (nz+1)
    ;         line 5: ignore energy_range?
    ;         line 6: ignore
    ;       (repeat 3-6 for number of meshes)
    ;       line 8: all the xb's are on this line separated by white space (in cm)
    ;       line 9: all the yb's are on this line separated by white space (in cm)
    ;       line 10: all the zb's are on this line separated by white space (in cm) (could also be angles depending on mesh)
    ;       line 11 to line (ny*nz+10): nx*ny*nz values of output in MeV/(g*photon)
    ;               each column represents x, each line represents y, ny lines implies change in z and reset y.
    ;       line (ny*nz+11) to line (2*ny*nz+10): nx*ny*nz values of fractional uncertainty
    ;
    ; :Returns:  {Tally_xyz:values,Unc_xyz:values, xb:xb,yb:yb,zb:zb, nps:nps}
    ;      Tally_xyz is nx*ny*nz elements and represents the data stored at 
    ;      mesh(i,j,k).
    ;      Unc_xyz is nx*ny*nz elements and represents the fractional uncertainty 
    ;      at mesh(i,j,k). (e.g. 0.01 means 1%)
    ;      xb,yb,zb represent the voxel boundaries. (in cm) 
    ;      (The voxels may not be evenly spaced or the same size.)
    ;      nps is the number of particles simulated.
    ;
    ; :Categories:
    ;
    ; :Keywords:
    ;    DOSE_FACTOR
    ;    POS_VOLUME_OBJ
    ;    MESH_NUM
    ;    VERBOSE
    ;
    ; :Examples:
    ;         Please put an example::
    ;
    ;         ;example IDL code
    ;         ; output success
    ;
    ;     regular comment again
    ;
    ; :History:
    ;         Created by Justin Mikell on Sep 21, 2010
                Ported to python by Justin Mikell on July 21, 2016
                No POS_VOLUME_OBJ support yet.
    """ 
    f = open(filename, 'r')      
    data = f.read()
    f.close()
    mylines = data.splitlines()
    n_lines = len(mylines)
    print("Importing data from :{0}".format(filename))
    if(VERBOSE > 0):
       print("first lines of file preceding data are...")
       for i in np.arange(9):
          print("{0}:{1}".format(i,mylines[i]))
            
    print("Removing extra whitespace..")
    for i in np.arange(len(mylines)):
        mylines[i] = " ".join(mylines[i].split())
    if(VERBOSE > 0):
       print("first lines after removing extra whitespace...")
       for i in np.arange(9):
          print("{0}:'{1}'".format(i,mylines[i]))
  
    #get the number of particles simulated
    npsline = mylines[1].split()
    nps = np.double(npsline[1])
    total_meshes = np.uint(npsline[0])
    if (MESH_NUM > total_meshes):
        print("Warning mesh_num: {0} not found. Setting mesh_num=1".format(MESH_NUM))
        MESH_NUM=1 #1=-based counting for mesh
    
    if(VERBOSE > 0):
        print("nps: {0}".format(nps))
        print("total_meshes: {0}".format(total_meshes))
    
    #read in all the mesh dimensions
    mesh_dimensions_dict = {}
    for i in np.arange(total_meshes):           
        boundsline = mylines[np.int(3+4*i)].split()
        if(VERBOSE > 0):
            print("reading bounds for mesh {0}".format(i))
            print("boundsline:{0}".format(boundsline))
        nx = np.long(boundsline[2])-1
        ny = np.long(boundsline[3])-1
        nz = np.long(boundsline[4])-1
        
        #this will help with 1D and 2D arrays
        if (nx == 0):
            nx = nx+1
        if (ny == 0):
            ny = ny+1
        if (nz == 0):
            nz = nz+1
        
        mesh_dimensions_dict[i] = [nx,ny,nz]
    
    #Each mesh consists of 3+2*ny*nz lines
    # 3 comes from xb,yb,zb
    # 2*nx*ny comes from the dose values and their uncertainties
    first_xb_line = 7+4*(total_meshes-1) #this is past all the "header" info
    xb_line = np.long(first_xb_line)
    print("xb_line:{0}".format(xb_line))
    for i in np.arange(total_meshes-1):
        xb_line = np.long(xb_line + 3 + 2*mesh_dimensions_dict[i][1]*mesh_dimensions_dict[i][2])
    
    print("xb_line:{0}".format(xb_line))
    #read in xbounds, ybounds, zbounds
    xb = np.array(mylines[xb_line].split(), dtype=np.float)
    yb = np.array(mylines[xb_line+1].split(), dtype=np.float)    
    zb = np.array(mylines[xb_line+2].split(), dtype=np.float)
    
    #now set nx,ny,nz based on the mesh we have selected to read
    nx = mesh_dimensions_dict[MESH_NUM-1][0]
    ny = mesh_dimensions_dict[MESH_NUM-1][1]
    nz = mesh_dimensions_dict[MESH_NUM-1][2]
    
    #see if ny,nz agree with rest of file
    if (np.longlong(2)*ny*nz) != (n_lines-10):
        print("This may be a spherical/cylindrical mesh with an implicit 0.")
        if(yb[ny] == 180):
            print("This appears to be a spherical/cylindrical mesh file")
        ans = 'NA'
        while ( ans.upper() != 'Y' and ans.upper() != 'N'):
            print("Treat as spherical/cylindrical with implicit 0 in polar angle? [y/n]")
            ans = input('[y/n]')
        if ans.upper() == 'Y':
            yb = np.append(0, yb)
            ny = ny + 1
    
    #create the funcrz
    print("reading in dose values now...")
    tally_xyz = np.ndarray([nx,ny,nz], dtype=np.double)
    for k in np.arange(nz):
        for j in np.arange(ny):
            temp = np.array(mylines[np.long(ny*k+j+(xb_line+3))].split(), dtype=np.double) #data starts 3 lines after xb
            if(VERBOSE > 0):
                print("Line number: {0}".format(np.long(ny*k+j+(xb_line+4))))
                print("total elements on line: {0}".format(len(temp)))
            
            if(len(temp) != nx):
                print("number of imported elements doesnt match expected!!!")
                exit(1)
            tally_xyz[:,j,k] = temp
    
    #now read the uncertainty    
    print("reading in uncertainty values...")
    unc_xyz = np.ndarray([nx,ny,nz], dtype=np.double)
    for k in np.arange(nz):
        for j in np.arange(ny):
            temp = np.array(mylines[np.long(ny*k+j+ny*nz+(xb_line+3))].split(), dtype=np.double)
            if(VERBOSE > 0):
                print("Line number: {0}".format(np.long(ny*k+j+ny*nz+(xb_line+4))))
                print("total elements on line: {0}".format(len(temp)))
            
            if(len(temp) != nx):
                print("number of imported elements doesnt match expected!!!")
                exit(1)
            unc_xyz[:,j,k] = temp
    
    tally_xyz = tally_xyz*DOSE_FACTOR
    return({'tally_xyz':tally_xyz, 'unc_xyz':unc_xyz, 'xb':xb, 'yb':yb, 'zb':zb, 'nps':nps})
Esempio n. 35
0
 def import_from_mdata_ascii(self, filename):
     """ Example 
     o = jkcm_mcnpx_rmesh()
     filename = "/home/justin/001m"
     o.import_from_mdata_ascii(filename)
     """
     
     f = open(filename, 'r')        
     data = f.read()
     f.close()
     mylines = data.splitlines()
     #get nps associated with mdata
     self.nps = np.longlong(mylines[0].split()[5])
     
    
     #find the nx,ny,nz line
     p = re.compile('^f .*')
     j=0
     for i in np.arange(len(mylines)):
        if(p.match(mylines[i])):
            j=i
            break
    
     q = mylines[j].split()
     nxyz = np.longlong(q[1])
     nx = np.long(q[3])
     ny = np.long(q[4])
     nz = np.long(q[5])
    
     #allocate your arrays
     self.tally_values= np.zeros([nxyz])
     self.unc_values=np.zeros([nxyz])
     self.xb = np.zeros([nx+1])
     self.yb = np.zeros([ny+1])
     self.zb = np.zeros([nz+1])
    
     #read in xbounds
     t=0
     for i in np.arange((j+1), len(mylines)):
        t= t + len(mylines[i].split())
        if(t == (nx+1)):
            break
     temp=''
     for k in np.arange((j+1),i+1):
         temp += mylines[k]
     self.xb = np.array(temp.split(), dtype=np.double)
     print("min/max xb: {0},{1}".format(min(self.xb),max(self.xb)))
     #read in ybounds
     j=i
     t=0
     for i in np.arange((j+1), len(mylines)):
        t= t + len(mylines[i].split())
        if(t == (ny+1)):
            break
     temp=''
     for k in np.arange((j+1),i+1):
         temp += mylines[k]
     self.yb = np.array(temp.split(), dtype=np.double)
     print("min/max yb: {0},{1}".format(min(self.yb),max(self.yb)))
     #read in zbounds
     j=i
     t=0
     for i in np.arange((j+1), len(mylines)):
        t= t + len(mylines[i].split())
        if(t == (nz+1)):
            break
     temp=''
     for k in np.arange((j+1),i+1):
         temp += mylines[k]
     self.zb = np.array(temp.split(), dtype=np.double)
     print("min/max zb: {0},{1}".format(min(self.zb),max(self.zb)))
     
     #advance to tally values
     p = re.compile('^vals.*')
     j=0
     for i in np.arange(len(mylines)):
        if(p.match(mylines[i])):
            j=i
            break
     #read everything into an array
     tempArr = np.zeros([2*nxyz])
     si=0
     fi=0
     for i in np.arange((j+1),len(mylines)):
         temp = np.asarray(mylines[i].split(), dtype=np.double)
         fi = si + len(temp)
         tempArr[si:fi] = temp
         si = fi
     
     #separate absobred dose and uncertainty
     self.unc_values = tempArr[1::2].reshape([nx,ny,nz],order='F')
     self.tally_values = tempArr[0::2].reshape([nx,ny,nz], order='F')
     #xc = 0.5*self.xb[0:-1] + 0.5*self.xb[1:]
     #e= np.reshape(np.repeat(xc,nz),[nx,nz])
     #e1=np.reshape(np.repeat(zc,nx), [nx,nz], order='F')
         
         
     
        
        
    
    
         
Esempio n. 36
0
pmc_to_longlong = _PMCIntegers.pmc_to_longlong

def longlong_to_pmc(*args):
  return _PMCIntegers.longlong_to_pmc(*args)
longlong_to_pmc = _PMCIntegers.longlong_to_pmc
try:
    import numpy

    RegisterPy2PMC(
        is_py = lambda x: type(x) is numpy.longlong,
        py2pmc = lambda x: longlong_to_pmc(long(x)),
    )

    RegisterPMC2Py(
        is_pmc = pmc_is_longlong,
        pmc2py = lambda x: numpy.longlong(pmc_to_longlong(x)),
    )
except ImportError: pass

import ctypes

RegisterPy2PMC(
    is_py = lambda x: type(x) is ctypes.c_longlong,
    py2pmc = lambda x: longlong_to_pmc(x.value),)

RegisterPMC2Py(
    is_pmc = pmc_is_longlong,
    pmc2py = lambda x: ctypes.c_longlong(pmc_to_longlong(x)),
)

Esempio n. 37
0
    def generate_bispectrum_matrix2(self,n=5,n_guess_bsp=1e6,verbose=False,bsp_mat='sparse'):
        ''' Calculates the matrix to convert from uv phases to bispectra.
        This version iterates through the sampling points in a vectorized way.
        It saves all of the triangles, then removes the duplicates every 'n'
        iterations. Reduce the number to save on ram but make it much slower.
        
        n_guess_bsp: guess the number of bispectra and pre-allocate the memory
        to save millions of 'append' calls (which was the slowest part). It must
        be large enough to contain all of the bispectra, or you will get an error.
        '''
        nbsp=self.nbuv*(self.nbuv-1)*(self.nbuv-2) / 6
        uv_to_bsp = np.zeros((n_guess_bsp,self.nbuv),dtype=np.long)
        bsp_u = np.zeros((n_guess_bsp,3)) # the u points of each bispectrum point
        bsp_v = np.zeros((n_guess_bsp,3)) # the v points of each bispectrum point
        already_done = np.zeros((n_guess_bsp),dtype=np.longlong) # to track the sets of uv points that have already been counted        
        bsp_ix=0
        uvrel=self.uvrel+np.transpose(self.uvrel)+1
        
        nbits=np.longlong(np.ceil(np.log(self.nbuv)/np.log(10)))
        
        print 'Calculating bispectrum matrix. Will take a few minutes.'
        
        # Loop over the first pupil sampling point
        tstart=time.time()
        for ix1 in range(self.nbh):
            
            # Loop over the second pupil sampling point
            for ix2 in range(ix1+1,self.nbh):
                # Rather than a for loop, vectorize it!
                ix3s=np.arange(ix2+1,self.nbh)
                n_ix3s=ix3s.size
                
                if (bsp_ix+n_ix3s) > n_guess_bsp:
                    raise IndexError('Number of calculated bispectra exceeds the initial guess for the matrix size!')
                
                # Find the baseline indices
                b1_ix=uvrel[ix1,ix2]
                b2_ixs=uvrel[ix2,ix3s]
                b3_ixs=uvrel[ix1,ix3s] # we actually want the negative of this baseline
                b1_ixs=np.repeat(b1_ix,n_ix3s)
                
                # What uv points are these?
                uv1=self.uv[b1_ixs,:]
                uv2=self.uv[b2_ixs,:]
                uv3=self.uv[b3_ixs,:]
                    
                # Are they already in the array? (any permutation of these baselines is the same)
                # Convert to a single number to find out.
                bl_ixs=np.array([b1_ixs,b2_ixs,b3_ixs])
                bl_ixs=np.sort(bl_ixs,axis=0)
                these_triplet_nums=(10**(2*nbits))*bl_ixs[2,:]+ (10**nbits)*bl_ixs[1,:]+bl_ixs[0,:]
                
                # Just add them all and remove the duplicates later.
                already_done[bsp_ix:bsp_ix+n_ix3s]=these_triplet_nums
                    
                # add to all the arrays
                uv_to_bsp_line=np.zeros((n_ix3s,self.nbuv))
                diag=np.arange(n_ix3s)
                uv_to_bsp_line[diag,b1_ixs]+=1
                uv_to_bsp_line[diag,b2_ixs]+=1
                uv_to_bsp_line[diag,b3_ixs]+=-1
                uv_to_bsp[bsp_ix:bsp_ix+n_ix3s,:]=uv_to_bsp_line

                bsp_u[bsp_ix:bsp_ix+n_ix3s,:]=np.transpose(np.array([uv1[:,0],uv2[:,0],uv3[:,0]]))
                bsp_v[bsp_ix:bsp_ix+n_ix3s,:]=np.transpose(np.array([uv1[:,1],uv2[:,1],uv3[:,1]]))
                bsp_ix+=n_ix3s
                
            # remove the duplicates every n loops
            if (ix1 % n) == ((self.nbh-1) % n):
                # the (nbh-1 mod n) ensures we do this on the last iteration as well
                dummy,unique_ix=np.unique(already_done[0:bsp_ix+n_ix3s],return_index=True)
                bsp_ix=len(unique_ix)
                already_done[0:bsp_ix]=already_done[unique_ix]
                already_done[bsp_ix:]=0
                uv_to_bsp[0:bsp_ix]=uv_to_bsp[unique_ix]
                bsp_u[0:bsp_ix]=bsp_u[unique_ix]
                bsp_v[0:bsp_ix]=bsp_v[unique_ix]
                
                # Only print the status every 5*n iterations
                if (ix1 % (5*n)) == ((self.nbh-1) % n):
                    print 'Done',ix1,'of',self.nbh,'. ',bsp_ix,' bispectra found. Time taken:',np.round(time.time()-tstart,decimals=1),'sec'
            
        print 'Done. Total time taken:',np.round((time.time()-tstart)/60.,decimals=1),'mins'
        
        # Remove the excess parts of each array and attach them to the kpi.
        nbsp=bsp_ix
        self.already_done=already_done
        self.nbsp=bsp_ix
        self.uv_to_bsp=uv_to_bsp[0:bsp_ix]
        self.bsp_u=bsp_u[0:bsp_ix]
        self.bsp_v=bsp_v[0:bsp_ix]
        print 'Found',nbsp,'bispectra'
        t_start2 = time.time()

        tol = 1e-5

        try:
            if bsp_mat == 'sparse':
                print 'Doing sparse svd'
                rank = np.linalg.matrix_rank(uv_to_bsp.astype('double'), tol = tol)
                print 'Matrix rank:',rank
                u, s, vt = svds(uv_to_bsp.astype('double').T, k=rank)

            elif bsp_mat == 'full':
                print 'Attempting full svd'
                u, s, vt = np.linalg.svd(uv_to_bsp.astype('double').T,full_matrices=False)

                rank = np.sum(s>tol)
            sys.stdout.flush()

            self.uv_to_bsp_raw = np.copy(uv_to_bsp)

            self.uv_to_bsp = u.T
            self.nbsp = rank 
            self.bsp_s = s

            print 'Reduced-rank bispectrum matrix calculated.'
            print 'Matrix shape',self.uv_to_bsp.shape
            print 'Time taken:',np.round((time.time()-t_start2)/60.,decimals=1),'mins'

            if verbose:
                print np.log(s) 
                return s
        except:
            print 'SVD failed. Using raw matrix.'
            self.uv_to_bsp = uv_to_bsp 
            self.nbsp = nbsp 
        sys.stdout.flush()