コード例 #1
0
ファイル: graph_uts.py プロジェクト: elcritch/scilab
    def set_labels(axes,
                   xx,
                   xp,
                   ax_dir='x',
                   side='bottom',
                   convertfunc=lambda x: 1.0 * x,
                   position=('outward', 40)):
        ax1twiny = axes.twiny() if ax_dir == 'x' else axes.twinx()

        Gax1twiny = lambda s: getattr(ax1twiny, s.format(x=ax_dir))
        Gaxes = lambda s: getattr(axes, s.format(x=ax_dir))

        oldaxvalues = np(Gaxes('get_{x}ticks')())
        oldbounds = np(Gaxes('get_{x}lim')())
        newbounds = convertfunc(oldbounds)
        # ax1Xlabels = np(Gax1twiny('get_{x}ticklabels')())
        # ax1Idxs = xx.searchsorted(ax1Xs)
        # ticks_cycles = np.linspace(newbounds[0], newbounds[-1], len(ax1Xs))
        ticks_cycles = convertfunc(oldaxvalues)
        # debug(ticks_cycles, oldbounds, newbounds, oldaxvalues)
        # debug(ax_dir, xx[::100], ax1Xs, ax1Idxs, ax1Xs.shape, xp.shape, ticks_cycles)

        Gax1twiny('set_{x}ticks')(ticks_cycles)
        Gax1twiny('set_{x}bound')(newbounds)
        Gax1twiny('set_{x}ticklabels')(
            ["{:.0f}".format(i) for i in ticks_cycles], rotation='vertical')
        Gax1twiny('set_{x}label')(labeler(xpl) + ' [%s]' % xp.units)

        Gax1twiny('set_frame_on')(True)
        Gax1twiny('patch').set_visible(False)
        Gax1twiny('{x}axis').set_ticks_position(side)
        Gax1twiny('{x}axis').set_label_position(side)
        Gax1twiny('spines')[side].set_position(position)
コード例 #2
0
def Y_He_control(eta, tau_n=879, Neff=3, xi_nu=0, steigman=False, Td=1):
    if steigman:
        df = pd.read_csv('n_over_p_Steigman.csv',
                         names=['x', 'y'],
                         delimiter=' ',
                         decimal=',')
        df['x'] = df['x'].div(1000)
        df['x'] = df['x'].apply(lambda row: 0.511 / row)
        df = df.set_index('x')
        np = interp1d(df.index.to_numpy(), df['y'].to_numpy(), kind='cubic')
        Xn = lambda z: 1 / (1 + 1 / np(z))
        if Td == 1:
            T_d = T_BBN(eta, Xn, Neff, xi_nu)
        elif Td == 2:
            T_d = T_BBN_2(eta, Xn)
        else:
            T_d = Td
        Xn = 1 / (1 + 1 / np(0.511 / T_d))
        return 2 * Xn

    df = pd.read_csv('Y_He_Pdg.csv',
                     names=['eta', 'Y'],
                     delimiter=' ',
                     decimal=',')
    df = df.set_index('eta')
    Y_spl = interp1d(df.index.to_numpy(), df['Y'].to_numpy())
    return Y_spl(eta)
コード例 #3
0
ファイル: graph_uts.py プロジェクト: manasdas17/scilab-2
    def set_labels(axes, xx, xp, ax_dir='x',side='bottom', 
                    convertfunc=lambda x: 1.0*x, position=('outward',40)):
        ax1twiny = axes.twiny() if ax_dir=='x' else axes.twinx()

        Gax1twiny = lambda s: getattr(ax1twiny, s.format(x=ax_dir))
        Gaxes = lambda s: getattr(axes, s.format(x=ax_dir))

        oldaxvalues = np(Gaxes('get_{x}ticks')())
        oldbounds = np(Gaxes('get_{x}lim')())
        newbounds = convertfunc(oldbounds)
        # ax1Xlabels = np(Gax1twiny('get_{x}ticklabels')())
        # ax1Idxs = xx.searchsorted(ax1Xs)
        # ticks_cycles = np.linspace(newbounds[0], newbounds[-1], len(ax1Xs))
        ticks_cycles = convertfunc(oldaxvalues)
        # debug(ticks_cycles, oldbounds, newbounds, oldaxvalues)
        # debug(ax_dir, xx[::100], ax1Xs, ax1Idxs, ax1Xs.shape, xp.shape, ticks_cycles)

        Gax1twiny('set_{x}ticks')      ( ticks_cycles )
        Gax1twiny('set_{x}bound')      ( newbounds )
        Gax1twiny('set_{x}ticklabels') ( [ "{:.0f}".format(i) for i in ticks_cycles ], rotation='vertical')
        Gax1twiny('set_{x}label')      ( labeler(xpl)+' [%s]'%xp.units)

        Gax1twiny('set_frame_on')(True)
        Gax1twiny('patch').set_visible(False)
        Gax1twiny('{x}axis').set_ticks_position(side)
        Gax1twiny('{x}axis').set_label_position(side)
        Gax1twiny('spines')[side].set_position(position)
コード例 #4
0
def rssi(D):
    Pt=1.35 #[W]
    Gt=
    Gr=
    wave_length=
    Rs=10*np(Gt*Gr*Pt)+20*log(wave_length/4*math.pi)-20*log(D)
    return Rs
コード例 #5
0
def function10():
    # Subtract the 1d array brr from the 2d array arr, such that each item of brr subtracts from respective row of arr.
    
    arr = np.array([[3,3,3],[4,4,4],[5,5,5]])
    brr = np.array([1,2,3])
    subt = np(arr.T,brr)# write your code here 
  
    return subt

     """
コード例 #6
0
def get_prediction(image_path):
    image = tf.keras.preprocessing.image.load_img(
        image_path, target_size = (SIZE, SIZE)
    )
    image = tf.keras.preprocessing.image.img_to_array(image)
    image = tf.keras.applications.mobilenet_v2.preprocess_input(image)
    image = np.expand_dims(image, axis=0)

    data = json.dumps({
        'instances':image.tolist()
    })
    response = requests.post(MODEL_URL, data=data.encode())
    result = json.loads(response.text)
    prediction = np(result['predictions'][0])
    class_name = CLASSES[int(prediction > 0.5)]
    return class_name
コード例 #7
0
ファイル: rnn_evaluation.py プロジェクト: marinamsm/Mask_RCNN
def generate_desc(model, tokenizer, photo, max_length):
    # seed the generation process
    in_text = 'startseq'
    # iterate over the whole length of the sequence
    for i in range(max_length):
        # integer encode input sequence
        sequence = tokenizer.texts_to_sequences([in_text])[0]
        # pad input
        sequence = pad_sequences([sequence], maxlen=max_length)
        # predict next word
        yhat = model.predict([photo, sequence], verbose=0)
        # convert probability to integer
        yhat = np(yhat)
        # map integer to word
        word = word_for_id(yhat, tokenizer)
        # stop if we cannot map the word
        if word is None:
            break
        # append as input for generating the next word
        in_text += ' ' + word
        # stop if we predict the end of the sequence
        if word == 'endseq':
            break
    return in_text
コード例 #8
0
ファイル: test_np.py プロジェクト: k7hoven/np
 def test_mixed_values(self):
     self.assertIdenticalArray(np([1,2.3,4,5.6]), np.array([1,2.3,4,5.6]))
コード例 #9
0
import sys
import os

if sys.platform == 'linux':
    os.system('clear')
    print('Linux')

else:
    os.system('cls')
    print('Windows')

import numpy as np

np()
コード例 #10
0
 def cast_to_np(self):
     self.captures = [np(capt) for capt in self.captures]
     return self.captures
コード例 #11
0
ファイル: open_nii.py プロジェクト: leiluoray1/pytorch-3dunet
    fa = nib.load("/iacl/pg20/shangxian/Thalamus/training_TBI/{}_left_01_FA_ref.nii.gz".format(inv)).get_fdata()
    fa = np.swapaxes(fa, 2, 0, 1)

    edge = nib.load("/iacl/pg20/shangxian/Thalamus/training_TBI/{}_left_01_edgemap.nii.gz".format(inv)).get_fdata()
    edge = np.swapaxes(edge, 2, 0, 1)

    mp = nib.load("/iacl/pg20/shangxian/Thalamus/training_TBI/{}_left_01_MPRAGEPre.nii.gz".format(inv)).get_fdata()
    mp = np.swapaxes(mp, 2, 0, 1)

    t2 = nib.load("/iacl/pg20/shangxian/Thalamus/training_TBI/{}_left_01_T2.nii.gz".format(inv)).get_fdata()
    t2 = np.swapaxes(t2, 2, 0, 1)

    label = nib.load("/iacl/pg20/shangxian/Thalamus/training_TBI/{}_left_01_mask.nii.gz".format(inv)).get_fdata()
    label = np.swapaxes(label, 2, 0, 1)

    data_x = np([fa, edge, mp, t2])
    h5_file.create_dataset('raw', data=data_x, compression='gzip', compression_opts=4, dtype='uint8')
    h5_file.create_dataset('label', data=label, compression='gzip', compression_opts=1, dtype='uint8')
    h5_file.close()

    h5_file = h5py.File('/iacl/pg20/shangxian/Thalamus/training_TBI/h5/{}_right.h5'.format(inv), 'w')

    fa = nib.load("/iacl/pg20/shangxian/Thalamus/training_TBI/{}_right_01_FA.nii.gz".format(inv)).get_fdata()
    fa = np.swapaxes(fa, 2, 0, 1)

    edge = nib.load("/iacl/pg20/shangxian/Thalamus/training_TBI/{}_right_01_edgemap.nii.gz".format(inv)).get_fdata()
    edge = np.swapaxes(edge, 2, 0, 1)

    mp = nib.load("/iacl/pg20/shangxian/Thalamus/training_TBI/{}_right_01_MPRAGEPre.nii.gz".format(inv)).get_fdata()
    mp = np.swapaxes(mp, 2, 0, 1)
コード例 #12
0
ファイル: affairs.py プロジェクト: dikshaa1702/ml
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(
    features, labels, test_size=0.6, random_state=40)

from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
features_train = sc.fit_transform(features_train)
features_test = sc.transform(features_test)

from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(features_train, labels_train)

prob = classifier.predict_proba(features_test)

# Predicting the class labels
labels_pred = classifier.predict(features_test)

#creating confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(labels_test, labels_pred)
print("confusion matrix:", cm)

Score_lin = classifier.score(features_test, labels_test)
print(" score for linear regression:", Score_lin)

per_for_actual_affair = data['affair'].value_counts(normalize=True)[1]
print("percentage of women having actual affair", per_for_actual_affair)
data = np([]).reshape(1, -1)
labels_pred = classifier.predict(data)
コード例 #13
0
ファイル: test_np.py プロジェクト: pombredanne/np
 def test_2D(self):
     a2d = np.arange(12).reshape((3, 4))
     self.assertIdenticalArray(np(a2d), np.array(a2d))
コード例 #14
0
#!/usr/bin/python3

import numpy as np

a = np()
コード例 #15
0
ファイル: test_np.py プロジェクト: pombredanne/np
 def test_mixed_values(self):
     self.assertIdenticalArray(np([1, 2.3, 4, 5.6]),
                               np.array([1, 2.3, 4, 5.6]))
コード例 #16
0
ファイル: test_np.py プロジェクト: pombredanne/np
 def test_0D(self):
     self.assertIdenticalArray(np(3), np.array(3))
コード例 #17
0
def update(frame):
    xdata.append(frame)
    ydata.append(np(frame))
    ln.set_data(xdata, ydata)
    return ln,
コード例 #18
0
ファイル: test_np.py プロジェクト: k7hoven/np
 def test_1D(self):
     self.assertIdenticalArray(np([1,3,4,5,6,9]), np.array([1,3,4,5,6,9]))
コード例 #19
0
ファイル: test_np.py プロジェクト: k7hoven/np
 def test_0D(self):
     self.assertIdenticalArray(np(3), np.array(3))
コード例 #20
0
import numpy as np
x = np([1,2,3,4],[5,6,7])
print("x:\n{}".format(x))
コード例 #21
0
ファイル: test_np.py プロジェクト: k7hoven/np
 def test_float_values(self):
     self.assertIdenticalArray(np([1.0, 2.0]), np.array([1.0,2.0]))
コード例 #22
0
ファイル: test_np.py プロジェクト: pombredanne/np
 def test_1D(self):
     self.assertIdenticalArray(np([1, 3, 4, 5, 6, 9]),
                               np.array([1, 3, 4, 5, 6, 9]))
コード例 #23
0
ファイル: test_np.py プロジェクト: k7hoven/np
 def test_2D(self):
     a2d = np.arange(12).reshape((3,4))
     self.assertIdenticalArray(np(a2d), np.array(a2d))                                  
コード例 #24
0
ファイル: test_np.py プロジェクト: pombredanne/np
 def test_float_values(self):
     self.assertIdenticalArray(np([1.0, 2.0]), np.array([1.0, 2.0]))
コード例 #25
0
ファイル: test_np.py プロジェクト: k7hoven/np
 def test_3D(self):
     a3d = np.arange(12).reshape((2,3,2))
     self.assertIdenticalArray(np(a3d), np.array(a3d))
コード例 #26
0
ファイル: test_np.py プロジェクト: pombredanne/np
 def test_3D(self):
     a3d = np.arange(12).reshape((2, 3, 2))
     self.assertIdenticalArray(np(a3d), np.array(a3d))
コード例 #27
0
                                states)  # output torch.Size([600, 10000])
        # 计算交叉熵时会自动独热处理
        loss = criterion(outputs, targets.reshape(-1))

        # Backward and optimize
        model.zero_grad()
        loss.backward()
        clip_grad_norm_(model.parameters(), 0.5)  # 梯度修剪
        optimizer.step()

        step = (i + 1) // seq_length
        if step % 100 == 0:
            print(
                'Epoch [{}/{}], Step[{}/{}], Loss: {:.4f}, Perplexity: {:5.2f}'
                .format(epoch + 1, num_epochs, step, num_batches, loss.item(),
                        np(loss.itep.exm())))

# Test the model
with torch.no_grad():
    with open('sample.txt', 'w') as f:
        # Set intial hidden ane cell states
        state = (torch.zeros(num_layers, 1, hidden_size).to(device),
                 torch.zeros(num_layers, 1, hidden_size).to(device))

        # Select one word id randomly
        prob = torch.ones(vocab_size)
        input = torch.multinomial(prob, num_samples=1).unsqueeze(1).to(
            device)  # input torch.Size([1, 1])

        for i in range(num_samples):
            # Forward propagate RNN
コード例 #28
0
 def filter_array_by_market(tickers_arr):
     a = np.asarray(tickers_arr)
     np(a[:, 1])
     mask = np.in1d(a[:, 1], filter)
     return a[mask]
コード例 #29
0
ファイル: mesh_hash.py プロジェクト: brendanaaa/Learnbgame
 def empty(size, dtype):
     return np(dtype, [0] * size)
コード例 #30
0
w = np.zeros([1, 3])

e = np.zeros(6)


def activationFunction(valor):
    if valor < 0.0:
        return (-1)
    else:
        return (1)


for j in range(numEpocas):
    for k in range(q):
        Xb = np.hstack((bias, x[:, k]))

        v = np.dot(w, Xb)

        Yr = activationFunction(v)

        e[k] = y[k] - Yr

        w = w + eta * e[k] * Xb

print("Vetor de erros (e) = " + str(e))

rede = np((peso, pH))
rede.treinar()

rede.teste([-0.79, -0.92])
rede.teste([-0.42, -0.09])
コード例 #31
0
ファイル: IIR.py プロジェクト: StrangeCloud9/SWatchPad
    pl.plot(t[index] * f0, out[index])
    pl.title(u"频率扫描波测量的滤波器频谱")
    pl.ylim(100, 400)
    pl.xlim(15500, 16500)
    pl.ylabel(u"增益(dB)")
    pl.xlabel(u"频率(Hz)")

    pl.subplots_adjust(hspace=0.3)
    pl.show()


if __name__ == "__main__":
    t = np.arange(0, 2, 1 / 8000.0)
    sweep = signal.chirp(t, f0=0, t1=2, f1=4000.0)
    out = iirFilter(sweep, 1000.0, 3000.0, 8000.0, 40, 2)
    out = np(out)
    #index = np.where(np.logical_and(out[1:-1]>out[:-2],out[1:-1]>out[2:]))[0]+1
    #pl.plot(t[index]/2.0*4000,out[index])
    pl.plot(t / 2.0 * 4000, out)


def iirFilter(name, leftlimit, rightlimit, flag=0):
    wf = wave.open(name, "rb")
    nframes = wf.getnframes()
    framerate = wf.getframerate()
    print "framerate:", framerate
    str_data = wf.readframes(nframes)
    wf.close()
    wave_data = np.fromstring(str_data, dtype=np.short)
    wave_data.shape = -1, 2
    wave_data = wave_data.T
コード例 #32
0
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.feature_selection import SelectKBest
from scipy.stats import pearsonr
from math import sqrt
filepath = r'E:\workspace\Dementia\Q_AD_DLB_VD_after_normalizion_0_to_1.csv'
dataset = pd.read_csv(filepath)
temp = dataset.copy()

##加载类标签,并对每个类别进行计数
VD_count = 0
AD_count = 0
DLB_count = 0
for i in range(len(temp)):
    if temp.loc[i, 'Diagnosis'] == 'DLB':
        temp.loc[i, 'Diagnosis'] = 0
        DLB_count += 1
    elif temp.loc[i, 'Diagnosis'] == 'AD':
        temp.loc[i, 'Diagnosis'] = 1
        AD_count += 1
    else:
        temp.loc[i, 'Diagnosis'] = 2
        VD_count += 1

raw_data = temp.drop('Diagnosis', 1)
raw_labels = list(temp.loc[:, 'Diagnosis'])
p_value = SelectKBest(lambda X, Y: np(map(lambda x: pearsonr(x, Y), X.T)).T,
                      k=10).fit_transform(raw_data, raw_labels)
コード例 #33
0
    """
    m = len(x)
    x = np.copy(x)
    a = np.copy(y)
    for k in range(1, m):
        a[k:m] = (a[k:m] - a[k - 1]) / (x[k:m] - x[k - 1])

    return a


x = [0, 1, 2]
y = [-1, 1, 5]
opa = poly_newton_coefficient(x, y)


def newton_polynomial(x_data, y_data, x):
    """
    x_data: data points at x
    y_data: data points at y
    x: evaluation point(s)
    """
    a = poly_newton_coefficient(x_data, y_data)
    n = len(x_data) - 1  # Degree of polynomial
    p = a[n]
    for k in range(1, n + 1):
        p = a[n - k] + (x - x_data[n - k]) * p
    return p


print(newton_polynomial(x, y, np([1, 2, 3])))
コード例 #34
0
    return state2idx

def action2index(data:dict) -> dict:
    list_of_states = list(data.keys())
    action2idx = {}
    index_of_action = 0
    for key, value in data.items():
        for action, tuples in data[key],items():
            if action not in action2idx:
                action2idx[action] = index_of_action
                index_of_action += 1
    return action2idxdef construct_state_action_state_matrix(data:dict, state2idx:dict, action2idx: dict):
    num_of_states = len(list(state2idx.keys()))
    num_of_actions = len(list(action2idx.keys()))
    transition_matrix = np.zeros((num_of_states, num_of_actions, num_of_states))
    Q_sa = np(zeros(num_of_state, num_of_actions, num_of_states))
    for state, dics in data.items():
        for action, tups in dics.items():
            for next_state, probs_reward in tups.item():
                curt_state_id = state2idx[state]
                next_state_id = state2idx[next_state]
                action_id = action2idx[action]
                transition_matrix[curt_state_id, action_id, next_state_id] += probs_reward[0]
                Q_sa[curt_state_id, action_id, next_state_id] += probs_reward[1]
    return (transition_matrix, Q_sa)

def construct_policy_matrix(policy: dict, state2idx:dict, action2idx:dict):
    current_policy = np.zeros((len(state2idx), len(action2idx)))
    for state, action_prob_dict in policy.items():
        for action, prob in action_prob_dict.items():
            current_policy[state2idx[state], action2idx[action]] += prob
コード例 #35
0
def add_pred_losses(gen_loss, net, snd, pr):
    if 'fg-bg' in pr.loss_types:
        gt = normalize_spec(snd.spec_parts[0], pr)
        pred = normalize_spec(net.pred_spec_fg, pr)

        if 'fg':
            diff = pred - gt
            loss = pr.l1_weight * tf.reduce_mean(tf.abs(diff))
            gen_loss.add_loss(loss, 'diff-fg')

            gt = normalize_phase(snd.phase_parts[0], pr)
            pred = normalize_phase(net.pred_phase_fg, pr)
            diff = pred - gt
            loss = pr.phase_weight * tf.reduce_mean(tf.abs(diff))
            gen_loss.add_loss(loss, 'phase-fg')

        if pr.predict_bg:
            gt = normalize_spec(snd.spec_parts[1], pr)
            pred = normalize_spec(net.pred_spec_bg, pr)
            diff = pred - gt
            loss = pr.l1_weight * tf.reduce_mean(tf.abs(diff))
            gen_loss.add_loss(loss, 'diff-bg')

            gt = normalize_phase(snd.phase_parts[1], pr)
            pred = normalize_phase(net.pred_phase_bg, pr)
            diff = pred - gt
            loss = pr.phase_weight * tf.reduce_mean(tf.abs(diff))
            gen_loss.add_loss(loss, 'phase-bg')

    if 'pit' in pr.loss_types:
        print 'Using permutation loss'
        ns = lambda x: normalize_spec(x, pr)
        np = lambda x: normalize_phase(x, pr)
        gts_ = [[ns(snd.spec_parts[0]),
                 np(snd.phase_parts[0])],
                [ns(snd.spec_parts[1]),
                 np(snd.phase_parts[1])]]
        preds = [[ns(net.pred_spec_fg),
                  np(net.pred_phase_fg)],
                 [ns(net.pred_spec_bg),
                  np(net.pred_phase_bg)]]
        l1 = lambda x, y: tf.reduce_mean(tf.abs(x - y), [1, 2])
        losses = []
        for i in xrange(2):
            gt = [gts_[i % 2], gts_[(i + 1) % 2]]
            print 'preds[0][0] shape =', shape(preds[0][0])
            fg_spec = pr.l1_weight * l1(preds[0][0], gt[0][0])
            fg_phase = pr.phase_weight * l1(preds[0][1], gt[0][1])

            bg_spec = pr.l1_weight * l1(preds[1][0], gt[1][0])
            bg_phase = pr.phase_weight * l1(preds[1][1], gt[1][1])

            losses.append(fg_spec + fg_phase + bg_spec + bg_phase)
        losses = tf.concat([ed(x, 0) for x in losses], 0)
        print 'losses shape =', shape(losses)
        loss_val = tf.reduce_min(losses, 0)
        print 'losses shape after min =', shape(losses)
        loss_val = pr.pit_weight * tf.reduce_mean(loss_val)
        #loss_val = tf.Print(loss_val, [losses])

        gen_loss.add_loss(loss_val, 'pit')
コード例 #36
0
 def init():
     pretrained_weight = np()
     self.item_embeds.weight.data.copy_(torch.from_numpy(pretrained_weight))
コード例 #37
0
ファイル: hello.py プロジェクト: deepserket/hello
 def array(arr):
     return np(arr)