Beispiel #1
0
    def compute(self):
        """
        """
        self.s.sort(axis=2)

        a_sig = np_mean(self.s[:,:,-25:], axis=2)
        a_noise = np_mean(self.s[:,:,1:52], axis=2)

        self.snr = 20*np_log10(a_sig/a_noise)
Beispiel #2
0
def qirplot(qir, qiw, title, figfile=None):
    f, ax = plt.subplots(6, 2, sharex=True)
    f.set_size_inches(18.5, 10.5)
    f.suptitle(title)
    for ii in range(3):

        ax[2 * ii, 0].imshow(20 * np_log10(qiw.f1[ii, :, :].transpose()),
                             cmap='gray')
        ax[2 * ii, 0].set_title('Freq1 - filter' + str(ii - 1))
        ax[2 * ii, 1].imshow(20 * np_log10(qiw.f2[ii, :, :].transpose()),
                             cmap='gray')
        ax[2 * ii, 1].set_title('Freq2 - filter' + str(ii - 1))

        ax[2 * ii + 1, 0].plot(qir.snr[ii, :])
        ax[2 * ii + 1, 1].plot(qir.snr[ii + 3, :])

    if figfile:
        f.savefig(figfile, dpi=100)
    else:
        f.show()
Beispiel #3
0
def check_if_same_space(fname_1, fname_2):
    from msct_image import Image
    from numpy import min, nonzero, all, around
    from numpy import abs as np_abs
    from numpy import log10 as np_log10

    im_1 = Image(fname_1)
    im_2 = Image(fname_2)
    q1 = im_1.hdr.get_qform()
    q2 = im_2.hdr.get_qform()

    dec = int(np_abs(round(np_log10(min(np_abs(q1[nonzero(q1)]))))) + 1)
    dec = 4 if dec > 4 else dec
    return all(around(q1, dec) == around(q2, dec))
Beispiel #4
0
 def decrementViaRowIndex(self, rowIndex, point=None):
     """Wrapper to decrement about point"""
     if point is None:
         point = tuple(np_around(self.PM.transformedCP[rowIndex]))
     # px = point[0]
     # py = point[1]
     # pz = point[2]
     multiplier = np_log10(self.PM.contigLengths[rowIndex])
     self.decrementAboutPoint(0, point[0], point[1], multiplier=multiplier)
     if self.numImgMaps > 1:
         self.decrementAboutPoint(1, self.PM.scaleFactor - point[2] - 1, point[1], multiplier=multiplier)
         self.decrementAboutPoint(
             2, self.PM.scaleFactor - point[2] - 1, self.PM.scaleFactor - point[0] - 1, multiplier=multiplier
         )
Beispiel #5
0
def check_if_same_space(fname_1, fname_2):
    from msct_image import Image
    from numpy import min, nonzero, all, around
    from numpy import abs as np_abs
    from numpy import log10 as np_log10

    im_1 = Image(fname_1)
    im_2 = Image(fname_2)
    q1 = im_1.hdr.get_qform()
    q2 = im_2.hdr.get_qform()

    dec = int(np_abs(round(np_log10(min(np_abs(q1[nonzero(q1)]))))) + 1)
    dec = 4 if dec > 4 else dec
    return all(around(q1, dec) == around(q2, dec))
Beispiel #6
0
    def findNewClusterCenters(self, ss=0):
        """Find a putative cluster"""

        inRange = lambda x, l, u: x >= l and x < u

        # we work from the top view as this has the base clustering
        max_index = np_argmax(self.blurredMaps[0])
        max_value = self.blurredMaps[0].ravel()[max_index]

        max_x = int(max_index / self.PM.scaleFactor)
        max_y = max_index - self.PM.scaleFactor * max_x
        max_z = -1

        ret_values = [max_value, max_x, max_y]

        start_span = int(1.5 * self.span)
        span_len = 2 * start_span + 1

        if self.debugPlots:
            self.plotRegion(max_x, max_y, max_z, fileName="Image_" + str(self.imageCounter), tag="column", column=True)
            self.imageCounter += 1

        # make a 3d grid to hold the values
        working_block = np_zeros((span_len, span_len, self.PM.scaleFactor))

        # go through the entire column
        (x_lower, x_upper) = self.makeCoordRanges(max_x, start_span)
        (y_lower, y_upper) = self.makeCoordRanges(max_y, start_span)
        super_putative_row_indices = []
        for p in self.im2RowIndicies:
            if inRange(p[0], x_lower, x_upper) and inRange(p[1], y_lower, y_upper):
                for row_index in self.im2RowIndicies[p]:
                    # check that the point is real and that it has not yet been binned
                    if row_index not in self.PM.binnedRowIndicies and row_index not in self.PM.restrictedRowIndicies:
                        # this is an unassigned point.
                        multiplier = np_log10(self.PM.contigLengths[row_index])
                        self.incrementAboutPoint3D(
                            working_block, p[0] - x_lower, p[1] - y_lower, p[2], multiplier=multiplier
                        )
                        super_putative_row_indices.append(row_index)

        # blur and find the highest value
        bwb = ndi.gaussian_filter(working_block, 8)  # self.blurRadius)
        densest_index = np_unravel_index(np_argmax(bwb), (np_shape(bwb)))
        max_x = densest_index[0] + x_lower
        max_y = densest_index[1] + y_lower
        max_z = densest_index[2]

        # now get the basic color of this dense point
        putative_center_row_indices = []

        (x_lower, x_upper) = self.makeCoordRanges(max_x, self.span)
        (y_lower, y_upper) = self.makeCoordRanges(max_y, self.span)
        (z_lower, z_upper) = self.makeCoordRanges(max_z, 2 * self.span)

        for row_index in super_putative_row_indices:
            p = np_around(self.PM.transformedCP[row_index])
            if inRange(p[0], x_lower, x_upper) and inRange(p[1], y_lower, y_upper) and inRange(p[2], z_lower, z_upper):
                # we are within the range!
                putative_center_row_indices.append(row_index)

        # make sure we have something to go on here
        if np_size(putative_center_row_indices) == 0:
            # it's all over!
            return None

        if np_size(putative_center_row_indices) == 1:
            # get out of here but keep trying
            # the calling function may restrict these indices
            return [[np_array(putative_center_row_indices)], ret_values]
        else:
            total_BP = sum([self.PM.contigLengths[i] for i in putative_center_row_indices])
            if not self.isGoodBin(total_BP, len(putative_center_row_indices), ms=5):  # Can we trust very small bins?.
                # get out of here but keep trying
                # the calling function should restrict these indices
                return [[np_array(putative_center_row_indices)], ret_values]
            else:
                # we've got a few good guys here, partition them up!
                # shift these guys around a bit
                center_k_vals = np_array([self.PM.kmerVals[i] for i in putative_center_row_indices])
                k_partitions = self.partitionVals(center_k_vals)

                if len(k_partitions) == 0:
                    return None
                else:
                    center_c_vals = np_array([self.PM.transformedCP[i][-1] for i in putative_center_row_indices])
                    # center_c_vals = np_array([self.PM.averageCoverages[i] for i in putative_center_row_indices])
                    center_c_vals -= np_min(center_c_vals)
                    c_max = np_max(center_c_vals)
                    if c_max != 0:
                        center_c_vals /= c_max
                    c_partitions = self.partitionVals(center_c_vals)

                    # take the intersection of the two partitions
                    tmp_partition_hash_1 = {}
                    id = 1
                    for p in k_partitions:
                        for i in p:
                            tmp_partition_hash_1[i] = id
                        id += 1

                    tmp_partition_hash_2 = {}
                    id = 1
                    for p in c_partitions:
                        for i in p:
                            try:
                                tmp_partition_hash_2[(tmp_partition_hash_1[i], id)].append(i)
                            except KeyError:
                                tmp_partition_hash_2[(tmp_partition_hash_1[i], id)] = [i]
                        id += 1

                    partitions = [
                        np_array([putative_center_row_indices[i] for i in tmp_partition_hash_2[key]])
                        for key in tmp_partition_hash_2.keys()
                    ]

                    # pcs = [[self.PM.averageCoverages[i] for i in p] for p in partitions]
                    # print pcs
                    return [partitions, ret_values]
Beispiel #7
0
    def shuffleBAMs(self):
        """Make the data transformation deterministic by reordering the bams"""
        # first we should make a subset of the total data
        # we'd like to take it down to about 1500 or so RI's
        # but we'd like to do this in a repeatable way
        ideal_contig_num = 1500
        sub_cons = range(len(self.indices))
        while len(sub_cons) > ideal_contig_num:
            # select every second contig when sorted by norm cov
            cov_sorted = np_argsort(self.normCoverages[sub_cons])
            sub_cons = np_array([
                sub_cons[cov_sorted[i * 2]]
                for i in np_arange(int(len(sub_cons) / 2))
            ])

            if len(sub_cons) > ideal_contig_num:
                # select every second contig when sorted by mer PC1
                mer_sorted = np_argsort(self.kmerNormPC1[sub_cons])
                sub_cons = np_array([
                    sub_cons[mer_sorted[i * 2]]
                    for i in np_arange(int(len(sub_cons) / 2))
                ])

        # now that we have a subset, calculate the distance between each of the untransformed vectors
        num_sc = len(sub_cons)

        # log shift the coverages towards the origin
        sub_covs = np_transpose([
            self.covProfiles[i] *
            (np_log10(self.normCoverages[i]) / self.normCoverages[i])
            for i in sub_cons
        ])
        sq_dists = cdist(sub_covs, sub_covs, 'cityblock')
        dists = squareform(sq_dists)

        # initialise a list of left, right neighbours
        lr_dict = {}
        for i in range(self.numStoits):
            lr_dict[i] = []

        too_big = 10000
        while True:
            closest = np_argmin(dists)
            if dists[closest] == too_big:
                break
            (i, j) = self.small2indices(closest, self.numStoits - 1)
            lr_dict[j].append(i)
            lr_dict[i].append(j)

            # mark these guys as neighbours
            if len(lr_dict[i]) == 2:
                # no more than 2 neighbours
                sq_dists[i, :] = too_big
                sq_dists[:, i] = too_big
                sq_dists[i, i] = 0.0
            if len(lr_dict[j]) == 2:
                # no more than 2 neighbours
                sq_dists[j, :] = too_big
                sq_dists[:, j] = too_big
                sq_dists[j, j] = 0.0

            # fix the dist matrix
            sq_dists[j, i] = too_big
            sq_dists[i, j] = too_big
            dists = squareform(sq_dists)

        # now make the ordering
        ordering = [0, lr_dict[0][0]]
        done = 2
        while done < self.numStoits:
            last = ordering[done - 1]
            if lr_dict[last][0] == ordering[done - 2]:
                ordering.append(lr_dict[last][1])
                last = lr_dict[last][1]
            else:
                ordering.append(lr_dict[last][0])
                last = lr_dict[last][0]
            done += 1

        # reshuffle the contig order!
        # yay for bubble sort!
        working = np_arange(self.numStoits)
        for i in range(1, self.numStoits):
            # where is this guy in the list
            loc = list(working).index(ordering[i])
            if loc != i:
                # swap the columns
                self.covProfiles[:, [i, loc]] = self.covProfiles[:, [loc, i]]
                self.stoitColNames[[i, loc]] = self.stoitColNames[[loc, i]]
                working[[i, loc]] = working[[loc, i]]
def hacer_CadenaConvolver(VIAS):

    print '\n# ------------------------------'
    print '# --------  CONVOLVER   --------'
    print '# ------------------------------'

    # EQ filtering:
    print '\n# --- EQ filtering:\n'
    for canal in CANALES:
        print 'filter "f_eq_' + canal + '" {'
        for input in bf_ini.options("inputs"):
            if input[-1].upper() == canal:
                print '    from_inputs:  "' + input[:-1] + input[-1].upper(
                ) + '";'
                print '    to_filters:   "f_drc_L", "f_drc_R" ;'
                print '    coeff:        "c_eq' + str(
                    CANALES.index(canal)) + '";'
        print '};'

    # DRC filtering:
    print '\n# --- DRC filtering (se reciben los dos canales para poder hacer MONO):\n'
    for canal in CANALES:
        print 'filter "f_drc_' + canal + '" {'

        if canal == "L":
            print '    from_filters: "f_eq_L"//1, "f_eq_R"//0 ;'
        else:
            print '    from_filters: "f_eq_L"//0, "f_eq_R"//1 ;'

        # Ahora debemos llevar la señal a las VIAs y a los SUBs si existieran
        # tmp1 recoje las vias separadas por *canal*
        tmp1 = [via for via in VIAS if '_' + canal in via]
        tmp1 = [x[:-1] + x[-1].upper() for x in tmp1]
        # tmp2 recoje los posibles *subwoofers*
        tmp2 = [via for via in VIAS if via[:2] == 'sw']
        print '    to_filters:   "f_' + '", "f_'.join(tmp1 + tmp2) + '";'

        # podemos usar el primer DRC:
        # print '    coeff:        "c_drc1_' + canal + '";'
        # o podemos dejarlo plano a la espere de los scripts del FIRtro:
        print '    coeff:        -1;'

        print '};'

    # XOVER filtering:
    print '\n# --- XOVER filtering:\n'

    # 1. Agrupamos por canales
    for canal in CANALES:

        # (A) RECORREMOS las VIAS declaradas en 'brutefir_config.ini', las de este canal.
        # OjO 'via' en brutefir_config.ini es vv_ch o sea via+canal (abajo 'viacha')
        #     Aquí vamos a centrarnos en un canal para hacer la cadena de xover ordenada
        for via in [viacha[:2] for viacha in VIAS if canal == viacha[-1]]:
            gain, polarity, delay = bf_ini.get("outputs",
                                               via + '_' + canal).split()[1:]

            # (B) RECORREMOS los coeffs listados en 'filters.scan'
            #
            #     Opcionalmente el .pcm podrá estar nombrado con el canal si es que se
            #     ubiera diseñado un filtrado diferenciado en alguna vía, ejemplo:
            #
            #     [lp_xo]
            #     c_lp-lo1 = ...../lp-lo_woofer.pcm
            #     c_lp-hi1 = ...../lp-hi-L_tweeter.pcm
            #     c_lp-hi2 = ...../lp-hi-R_tweeter.pcm
            #     ... etc ...

            # Priorizamos filtros lp
            if filters_ini.options('lp_xo'):
                inipha = 'lp_xo'
            else:
                inipha = 'mp_xo'

            matched = ''
            pcmPrioritario = False
            for coeff in filters_ini.options(inipha):
                pcm = filters_ini.get(inipha, coeff).split("/")[-1]
                pcmvia = pcm[3:5]
                pcmcan = ''
                if pcm[5:7] == '-L':
                    pcmcan = 'L'
                if pcm[5:7] == '-R':
                    pcmcan = 'R'

                if pcmvia == via:
                    if pcmcan == canal:
                        pcmPrioritario = True
                        matched = coeff
                    # Se prioriza un pcm dedicado a un canal aunque apareciera otro genérico
                    elif not pcmcan and not pcmPrioritario:
                        matched = coeff

            print 'filter "f_' + via + "_" + canal + '" {'
            print '    from_filters: "f_drc_' + canal + '";'
            print '    to_outputs:   "' + via + '_' + canal + '"/' + gain + '/' + polarity + ';'

            if matched:
                print '    coeff:        "' + matched + '";'
            else:
                if via == 'fr':
                    print '    coeff:        -1;'
                else:
                    print '    coeff:        "COEFF NO ENCONTRADO PARA ESTA VIA";'
            print '};'

    # SUBWOOFER filtering
    if any(via for via in VIAS if via[:3] == 'sw_'):
        print '\n# --- SUB filtering:\n'

        # (A) RECORREMOS las vias de subwoofer declaradas en 'brutefir_config.ini'
        for via in [x for x in VIAS if x[:2] == 'sw']:

            gain, polarity, delay = bf_ini.get("outputs", via).split()[1:]
            mixAtt = '{:.2}'.format(-10 * np_log10(1.0 / len(CANALES)))

            # (B) RECORREMOS los coeffs listados en 'filters.scan'

            # Priorizamos filtros lp
            if filters_ini.options('lp_sw'):
                inipha = 'lp_sw'
            else:
                inipha = 'mp_sw'

            matched = ''
            for coeff in filters_ini.options(inipha):
                pcm = filters_ini.get(inipha, coeff).split("/")[-1]
                # ejemplos  via:     sw_amr     pcm:      lp-sw-amr_60Hz.pcm
                #                               pcm:      lp-sw-amr.pcm
                #                               pcmvia:   sw-amr
                #           viaswID:    amr     pcmswID:     amr
                pcmvia = pcm[3:].replace('.pcm', '').split("_")[0]
                pcmswID = pcmvia.split("-")[-1]
                viaswID = via.split("_")[-1]

                if pcmswID == viaswID:
                    matched = coeff

            print 'filter "f_' + via + '" {'
            tmp = '"f_drc_' + ('"/' + mixAtt +
                               ', "f_drc_').join(CANALES) + '"/' + mixAtt + ';'
            print '    from_filters: ' + tmp
            print '    to_outputs:   "' + via + '"/' + gain + '/' + polarity + ';'
            if matched:
                print '    coeff:        "' + matched + '";'
            else:
                print '    coeff:        "COEFF NO ENCONTRADO PARA ESTE SUBWOOFER";'
            print '};'
Beispiel #9
0
    def shuffleBAMs(self):
        """Make the data transformation deterministic by reordering the bams"""
        # first we should make a subset of the total data
        # we'd like to take it down to about 1500 or so RI's
        # but we'd like to do this in a repeatable way
        ideal_contig_num = 1500
        sub_cons = range(len(self.indices))
        while len(sub_cons) > ideal_contig_num:
            # select every second contig when sorted by norm cov
            cov_sorted = np_argsort(self.normCoverages[sub_cons])
            sub_cons = np_array([sub_cons[cov_sorted[i*2]] for i in np_arange(int(len(sub_cons)/2))])

            if len(sub_cons) > ideal_contig_num:
                # select every second contig when sorted by mer PC1
                mer_sorted = np_argsort(self.kmerNormPC1[sub_cons])
                sub_cons = np_array([sub_cons[mer_sorted[i*2]] for i in np_arange(int(len(sub_cons)/2))])

        # now that we have a subset, calculate the distance between each of the untransformed vectors
        num_sc = len(sub_cons)

        # log shift the coverages towards the origin
        sub_covs = np_transpose([self.covProfiles[i]*(np_log10(self.normCoverages[i])/self.normCoverages[i]) for i in sub_cons])
        sq_dists = cdist(sub_covs,sub_covs,'cityblock')
        dists = squareform(sq_dists)

        # initialise a list of left, right neighbours
        lr_dict = {}
        for i in range(self.numStoits):
            lr_dict[i] = []

        too_big = 10000
        while True:
            closest = np_argmin(dists)
            if dists[closest] == too_big:
                break
            (i,j) = self.small2indices(closest, self.numStoits-1)
            lr_dict[j].append(i)
            lr_dict[i].append(j)

            # mark these guys as neighbours
            if len(lr_dict[i]) == 2:
                # no more than 2 neighbours
                sq_dists[i,:] = too_big
                sq_dists[:,i] = too_big
                sq_dists[i,i] = 0.0
            if len(lr_dict[j]) == 2:
                # no more than 2 neighbours
                sq_dists[j,:] = too_big
                sq_dists[:,j] = too_big
                sq_dists[j,j] = 0.0

            # fix the dist matrix
            sq_dists[j,i] = too_big
            sq_dists[i,j] = too_big
            dists = squareform(sq_dists)

        # now make the ordering
        ordering = [0, lr_dict[0][0]]
        done = 2
        while done < self.numStoits:
            last = ordering[done-1]
            if lr_dict[last][0] == ordering[done-2]:
                ordering.append(lr_dict[last][1])
                last = lr_dict[last][1]
            else:
                ordering.append(lr_dict[last][0])
                last = lr_dict[last][0]
            done+=1

        # reshuffle the contig order!
        # yay for bubble sort!
        working = np_arange(self.numStoits)
        for i in range(1, self.numStoits):
            # where is this guy in the list
            loc = list(working).index(ordering[i])
            if loc != i:
                # swap the columns
                self.covProfiles[:,[i,loc]] = self.covProfiles[:,[loc,i]]
                self.stoitColNames[[i,loc]] = self.stoitColNames[[loc,i]]
                working[[i,loc]] = working[[loc,i]]
print 'Grid Z', Grid2_Z
# print 'Grid Rs (pc)', Grid2_logRs
# print 'Grid log(U)', Grid2_logU

nH          = 10.0 #cm^-3
c           = 29979245800.0 #cm/s
pc_to_cm    = 3.0856776e18 
markers     = ['o','s','^'] 


#Plot ionization parameters versus EqW
x_array = array([])
y_array = array([])
 
Sulfur_Ratio = (Flux_Frame.loc['S2_6716A'] + Flux_Frame.loc['S2_6731A']) / (Flux_Frame.loc['S3_9069A'] + Flux_Frame.loc['S3_9531A'])
obj_SIIbySIII = np_log10(Sulfur_Ratio.astype('float64'))
  
colors_list = ['#CC79A7', '#D55E00', '#bcbd22']
metals_list = [0.0001, 0.004, 0.02]
for i in range(len(metals_list)):
    Z = metals_list[i]
    indeces = (Frame_MetalsEmission["Z"] == Z)
    x = np_log10((Frame_MetalsEmission.loc[indeces, '[SII]6716'] + Frame_MetalsEmission.loc[indeces, '[SII]6731']) / (Frame_MetalsEmission.loc[indeces, '[SIII]9069'] + Frame_MetalsEmission.loc[indeces, '[SIII]9532']))
    y = Frame_MetalsEmission.loc[indeces, 'logU']
    dz.data_plot(x, y, label='z = {Z}'.format(Z = Z), markerstyle=markers[i], color=colors_list[i])
    x_array = hstack([x_array, x])
    y_array = hstack([y_array, y])
     
#Lineal model
lineal_mod          = LinearModel(prefix='lineal_')
Lineal_parameters   = lineal_mod.guess(y_array, x=x_array)
Beispiel #11
0
def r5_dnn_image(target_dirname,
                 chandat_obj=None,
                 chandat_dnn_obj=None,
                 is_saving_chandat_image=True):
    LOGGER.info('{}: r5: Turning chandat into upsampled envelope...'.format(
        target_dirname))
    if chandat_obj is None:
        chandat_obj = loadmat(os_path_join(target_dirname, CHANDAT_FNAME))
    f0 = chandat_obj['f0']
    if chandat_dnn_obj is None:
        chandat_dnn_obj = loadmat(
            os_path_join(target_dirname, CHANDAT_DNN_FNAME))
    chandat_dnn = chandat_dnn_obj['chandat_dnn']
    beam_position_x = chandat_dnn_obj['beam_position_x']
    depth = chandat_dnn_obj['depth']
    if f0.ndim and f0.ndim == 2:
        f0 = f0[0, 0]

    rf_data = chandat_dnn.sum(axis=1)
    del chandat_dnn, chandat_dnn_obj['chandat_dnn']
    # design a bandpass filter
    n = 4
    order = n / 2
    critical_frequencies = [1e6, 9e6] / (4 * f0 / 2)
    b, a = butter(order, critical_frequencies,
                  btype='bandpass')  # Results are correct

    # chandat_dnn = chandat_dnn.astype(float, copy=False) # REVIEW: necessary?

    rf_data_filt = filtfilt(b,
                            a,
                            rf_data,
                            axis=0,
                            padtype='odd',
                            padlen=3 * (max(len(b), len(a)) - 1))  # Correct
    del a, b

    env = np_apply_along_axis(better_envelope, 0, rf_data_filt)
    # print('r5: env.shape =', env.shape)

    np_divide(env, env.max(), out=env)
    clip_to_eps(env)
    # np_clip(env, np_spacing(1), None, out=env)
    env_dB = np_zeros_like(env)
    np_log10(env, out=env_dB)
    np_multiply(env_dB, 20, out=env_dB)

    # Upscale lateral sampling
    up_scale = get_dict_from_file_json(
        os_path_join(
            target_dirname,
            TARGET_PARAMETERS_FNAME))[TARGET_PARAMETERS_KEY_SCALE_UPSAMPLE]
    up_scale_inverse = 1 / up_scale

    num_beams = env.shape[1]

    x = np_arange(1, num_beams + 1)

    new_x = np_arange(1, num_beams + up_scale_inverse, up_scale_inverse)

    # TODO: optimization: instead of doing this apply thing, can we pass in the
    #       whole `env` and specify axis?
    def curried_pchip(y):
        return pchip(x, y)(new_x)

    env_up = np_apply_along_axis(curried_pchip, 1, env)
    # print('r5: env_up.shape =', env_up.shape)
    del curried_pchip, new_x, x

    clip_to_eps(env_up)
    # np_clip(env_up, np_spacing(1), None, out=env_up)
    env_up_dB = np_zeros_like(env_up)
    np_log10(env_up, out=env_up_dB)
    np_multiply(env_up_dB, 20, out=env_up_dB)

    beam_position_x_up = np_linspace(beam_position_x.min(),
                                     beam_position_x.max(), env_up_dB.shape[1])  # pylint: disable=E1101, E1136
    del beam_position_x

    chandat_image_obj = {
        'rf_data': rf_data,
        'rf_data_filt': rf_data_filt,
        'env': env,
        'env_dB': env_dB,
        'envUp': env_up,
        'envUp_dB': env_up_dB,
        'beam_position_x_up': beam_position_x_up,
        'depth': depth,
    }

    if is_saving_chandat_image is True:
        chandat_image_path = os_path_join(target_dirname,
                                          CHANDAT_IMAGE_SAVE_FNAME)
        savemat(chandat_image_path, chandat_image_obj)

    LOGGER.info('{}: r5 Done'.format(target_dirname))
    return chandat_image_obj
Beispiel #12
0
                yield float(val)

Data_Dict = generate_variables()

#Declare number of levels (This is right for the He5.atom file)
Data_Dict['nlev'] = 5
manage_warnings('Atomic levels number', Data_Dict)

#Import the data from the data files:
import_atomData(Data_Dict)

#Convert from air to space wavelength
aircalc(Data_Dict)

#Convert density and temperature to log values
Data_Dict['Tg'] = np_log10(Data_Dict['Tg'])
Data_Dict['dg'] = np_log10(Data_Dict['dg'])

print Data_Dict['Tg']
print Data_Dict['dg']

#Declare physical conditions you want to treat
T       = 10000
den     = 100
tau3889 = 1.06

#Convert input data
Data_Dict['t4']         = T / 1e4
Data_Dict['den']        = den
Data_Dict['tau3889']    = tau3889
Data_Dict['denHe']      = Data_Dict['AHe'] * den
# print 'Grid Rs (pc)', Grid2_logRs
# print 'Grid log(U)', Grid2_logU

nH  = 10.0 #cm^-3
c   = 29979245800.0 #cm/s
pc_to_cm = 3.0856776e18 

print 


#Line intensity ratios for different metallicities 
x_array = array([])
y_array = array([])

#Adding a new column with the data we want
Frame_MetalsEmission['Q'] = np_log10(power(10, Frame_MetalsEmission['logU']) * 4 * pi * c * nH * power(Frame_MetalsEmission['logR'] * pc_to_cm, 2))

for Z in [0.008]:
    indeces     = (Frame_MetalsEmission["Z"] == Z) & (Frame_MetalsEmission["M_Msun"] == 40000)
    tabulated   = Frame_MetalsEmission.loc[indeces, 'logU'].values
    calculated  = power(10, Frame_MetalsEmission.loc[indeces, 'logU']) * 4 * pi * c * nH * power(Frame_MetalsEmission.loc[indeces, 'logR'] * pc_to_cm, 2)
    
    print 'ionizations', Frame_MetalsEmission.loc[indeces, 'logU'].values
    print 'Q(H)', np_log10(calculated).values
    print 'Q(H) 2', Frame_MetalsEmission.loc[indeces, 'Q'].values
 
    
index =   (Frame_MetalsEmission["Z"] == Z) & (Frame_MetalsEmission["M_Msun"] == 40000) & (Frame_MetalsEmission["t"] == 5.00)
print 'These values', Frame_MetalsEmission.loc[index, 'logU'].values, Frame_MetalsEmission.loc[index, 'logR'].values
print 'These values', Frame_MetalsEmission.loc[index, 'logU'].values[0], Frame_MetalsEmission.loc[index, 'logR'].values[0]