Пример #1
0
 class Model:
     FEATURE_SCALES = {"feature_l": 32, "feature_m": 16, "feature_s": 8}
     FEATURE_ORDER = ["feature_s", "feature_m", "feature_l"]
     ANCHORS_PIXEL = np.arra([[13, 10], [30, 16], [23, 33], [
         61,
         30,
     ], [45, 62], [119, 59], [90, 116], [198, 156], [326, 373]])
Пример #2
0
def proc2_letnetiii():

    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    x = tf.placeholder(tf.float32, [None, 784])
    data, labels = mnist.train.next_batch(32)

    data = np.arra(map(lambda x: x.reshape(28, 28), data))
    #expand = lambda x :
    print(data)
Пример #3
0
 def predict(self, X):  #this predicts classification
   X = self.build_matrix(X)    
   if self.distribution in [ 'bernoulli', 'multinomial']:
       preds = self.model.predict(X ).as_data_frame().as_matrix()[:,2] 
       preds=np.arra([1 if pr >=0.5 else 0 for pr in preds]) 
   else :
       preds = self.model.predict(X ).as_data_frame().as_matrix()[:,0]
   X=None
   gc.collect()    
   return preds
Пример #4
0
def generateDoctRank(W):

    doctRankings = []
    topics = W.shape[1]
    for topicIndex in range(topics):
        W = np.arra(W[:, topicIndex])
        topIndicies = np.argsort(W)[::-1]
        doctRankings.append(topIndicies)

    return doctRankings
Пример #5
0
def pipeline(theta, sigma, batch_size=1000, method='avo'):
    # repeat until delta uncertainty is below epsilon:
    #while delta > epsilon:
    for _ in range(10):
        # -- set initial thetas and priors from arguments
        x_gen_train = sample(theta, sigma, batch_size)
        x_gen_test = sample(theta, sigma, batch_size)
        # -- sample target data distribution N times
        x_data = sample(np.array([0.55, 2.0]), np.arra([1.5, 0.5]), batch_size)
        # -- train discriminant (to convergence?) to distinguish samples
        d = train_discriminator(x_gen, x_data)
        yhat = d.predict(x_gen_test)
        # -- update parameters
        theta, sigma = update(theta, sigma, yhat, method)
    print theta, sigma
 def transformWorld2NormImageDist(self, X, Y, R, t, k):    # Zhang 3.3
     '''
     Inputs:
         X, Y, Z: the world coordinates of the points for a given board. This is an array of 63 elements
                  X, Y come from genCornerCoordinates. Since the board is planar, we assume Z is an array of zeros.
         R, t: the camera extrinsic parameters (rotation matrix and translation vector) for a given board.
     Outputs:
         x_br, y_br: the real, normalized image coordinates         
     '''
     ########## Code starts here ##########        
     x, y = self.transformWorld2NormImageUndist(X, Y, np.zeros_like(X), R, t) # Creating a zeros array of same size as X
     x = np.array(x)
     y = np.arra(y)
     x_br = x + x * (k[0]*(np.power(x, 2) + np.power(y, 2)) + k[1]*np.power((np.power(x, 2) + np.power(y, 2)), 2)) # Estimating new distance by adding distortion
     y_br = y + y * (k[0]*(np.power(x, 2) + np.power(y, 2)) + k[1]*np.power((np.power(x, 2) + np.power(y, 2)), 2))
     ########## Code ends here ##########        
     return x_br, y_br
    KL.append(
        scipy.stats.entropy(distribution_true, distrubtion_pred, base=None))

#%% selection of top few important feature maps using ranked feature maps...............................................................................
k = 0
for i in range(len(KL)):
    win = KL[i:i + 10]
    abs_diff = np.sum(np.abs(np.diff(win)))
    if abs_diff < 0.01:
        final_index = i
        print(i)
        k = k + 1
        break

print('The top-l feature maps are :', final_index)

#%% (d) rank based feature-map selection (geometrical method).................................................................................................................

Th = 150  # threshold
os.chdir('/home/arshdeep/FILTER_SELECTION_WORK_JULY18/rank_based')
rnk = []
index = []
for i in range(np.shape(data_normalized)[2]):
    filter_i = data_normalized[:, :, i]
    rnk.append(np.linalg.matrix_rank(filter_i))
    if rnk[i] == Th:
        index.append(i)

sim_index = index
np.save(filename, np.arra(sim_index))  #save important indexes
#!/usr/bin/python

import numpy as np
import cv2

COLOR_RANGES = {"BLUE":[np.array([110,50,50]), np.array([130,255,255])],
                "RED":[np.array([0,50,50]), np.array([20,255,255])],
                "YELLOW":[],
                "GREEN":[np.array([80,50,50]), np.arra([100,255,255])],
                "BLACK":[] }

SIZE = 1000

class ImageProcessor(object):

    def __init__(self, cv_image = None):
        self.cv_image = cv_image
        if cv_image is None:
            self.hsv_image = None
        else:
            self.hsv_image = cv2.cvtColor(cv_image,cv2.COLOR_BGR2HSV)

    def setImage(self, cv_image):
        self.cv_image = cv_image
        self.hsv_image = cv2.cvtColor(cv_image,cv2.COLOR_BGR2HSV)
    
    def findBlock(self, color):
        c_range = COLOR_RANGES.get(color,None)
        if c_range is None:
            print "Invalid Color"
            return None
    def compute_additional_features(self):
        """
        Calculates additional features from the ECG using the results from
        extract_features().

        That is, the PR, QT, RR and QRS intervals duration and the QRS
        amplitude. The result is saved internally.
        """
        if not self.lead:
            return
        no_nans = False
        i = -1
        while no_nans:
            i += 1
            if i >= len(self.wave_peaks["ECG_P_Peaks"]) - 1:
                no_nans = True
            features = np.arra([
                self.r_peaks["ECG_R_Peaks"][i],
                self.waves_peaks["ECG_P_Peaks"][i],
                self.waves_peaks["ECG_T_Peaks"][i],
            ], )
            if not np.isnan(features).any():
                no_nans = True

        sampling_rate = np.array([])
        for i, k in enumerate(self.sampling_rate):
            if i == len(self.sampling_rate) - 1:
                length = len(self.r_peaks["ECG_R_Peaks"]) - len(sampling_rate)
            else:
                length = len(
                    np.array(self.r_peaks["ECG_R_Peaks"])
                    [(k[1] <= self.r_peaks["ECG_R_Peaks"])
                     & (self.r_peaks["ECG_R_Peaks"] < self.sampling_rate[i + 1]
                        [1])], )
            sampling_rate = np.append(sampling_rate, np.array([k[0]] * length))

        # Compute PR interval duration
        # Compute PR segment duration
        if self.waves_peaks["ECG_P_Peaks"][i] < self.r_peaks["ECG_R_Peaks"][i]:
            pr_interval = (
                np.array(self.waves_peaks["ECG_P_Onsets"]) -
                np.array(self.waves_peaks["ECG_R_Onsets"])) / sampling_rate
            pr_segment = (
                np.array(self.waves_peaks["ECG_P_Offsets"]) -
                np.array(self.waves_peaks["ECG_R_Onsets"])) / sampling_rate
        else:
            self.other_features["ECG_PR_Interval"].append(np.nan)
            self.other_features["ECG_PR_Segment"].append(np.nan)
            pr_interval = (np.array(self.waves_peaks["ECG_P_Onsets"][:-1]) -
                           np.array(self.waves_peaks["ECG_R_Onsets"][1:])
                           ) / sampling_rate[1:]
            pr_segment = (np.array(self.waves_peaks["ECG_P_Offsets"][:-1]) -
                          np.array(self.waves_peaks["ECG_R_Onsets"][1:])
                          ) / sampling_rate[1:]
        self.other_features["ECG_PR_Interval"].extend(pr_interval)
        self.other_features["ECG_PR_Segment"].extend(pr_segment)

        # Compute QT interval duration
        # Compute ST segment duration
        # Compute ST segment height
        r_offsets = np.array(self.waves_peaks["ECG_R_Offsets"])
        is_nan_r_offsets = np.isnan(r_offsets)
        no_nan_r_offsets = r_offsets[~is_nan_r_offsets]
        t_onsets = np.array(self.waves_peaks["ECG_T_Onsets"])
        is_nan_t_onsets = np.isnan(t_onsets)
        no_nan_t_onsets = t_onsets[~is_nan_t_onsets]

        ecg_values = ECG_TMAPS[f"{self.lead}_value"].tensor_from_file(
            ECG_TMAPS[f"{self.lead}_value"],
            self,
            visit=self.visit,
        )[0][np.append(no_nan_r_offsets, no_nan_t_onsets).astype(int)]

        ecg_r_offsets = ecg_values[:no_nan_r_offsets.shape[0]]
        index = np.where(is_nan_r_offsets)[0]
        index -= np.array(range(index.shape[0])).astype(int)
        ecg_r_offsets = np.append(ecg_r_offsets,
                                  np.array([np.nan] * index.shape[0]))
        ecg_r_offsets = np.insert(ecg_r_offsets, index,
                                  np.nan)[:r_offsets.shape[0]]

        ecg_t_onsets = ecg_values[no_nan_r_offsets.shape[0]:]
        index = np.where(is_nan_t_onsets)[0]
        index -= np.array(range(index.shape[0])).astype(int)
        ecg_t_onsets = np.append(ecg_t_onsets,
                                 np.array([np.nan] * index.shape[0]))
        ecg_t_onsets = np.insert(ecg_t_onsets, index,
                                 np.nan)[:t_onsets.shape[0]]

        if self.r_peaks["ECG_R_Peaks"][i] < self.waves_peaks["ECG_T_Peaks"][i]:
            qt_interval = (
                np.array(self.waves_peaks["ECG_R_Onsets"]) -
                np.array(self.waves_peaks["ECG_T_Offsets"])) / sampling_rate
            st_segment = (
                np.array(self.waves_peaks["ECG_R_Offsets"]) -
                np.array(self.waves_peaks["ECG_T_Onsets"])) / sampling_rate
            st_height = ecg_r_offsets - ecg_t_onsets
        else:
            self.other_features["ECG_QT_Interval"].append(np.nan)
            self.other_features["ECG_ST_Segment"].append(np.nan)
            self.other_features["ECG_ST_Height"].append(np.nan)
            qt_interval = (np.array(self.waves_peaks["ECG_R_Onsets"][:-1]) -
                           np.array(self.waves_peaks["ECG_T_Offsets"][1:])
                           ) / sampling_rate[:-1]
            st_segment = (np.array(self.waves_peaks["ECG_R_Offsets"][:-1]) -
                          np.array(self.waves_peaks["ECG_T_Onsets"][1:])
                          ) / sampling_rate[:-1]
            st_height = ecg_r_offsets[:-1] - ecg_t_onsets[1:]
        self.other_features["ECG_QT_Interval"].extend(qt_interval)
        self.other_features["ECG_ST_Segment"].extend(st_segment)
        self.other_features["ECG_ST_Height"].extend(list(st_height))

        # Compute TP segment duration
        if self.waves_peaks["ECG_P_Peaks"][i] < self.waves_peaks[
                "ECG_T_Peaks"][i]:
            self.other_features["ECG_TP_Segment"].append(np.nan)
            tp_segment = (np.array(self.waves_peaks["ECG_T_Offsets"][1:]) -
                          np.array(self.waves_peaks["ECG_P_Onsets"][:-1])
                          ) / sampling_rate[1:]
        else:
            tp_segment = (
                np.array(self.waves_peaks["ECG_T_Offsets"]) -
                np.array(self.waves_peaks["ECG_P_Onsets"])) / sampling_rate
        self.other_features["ECG_TP_Segment"].extend(tp_segment)

        # Compute RR interval duration
        rr_interval = (np.array(self.r_peaks["ECG_R_Peaks"][1:]) - np.array(
            self.r_peaks["ECG_R_Peaks"][:-1])) / sampling_rate[1:]
        self.other_features["ECG_RR_Interval"].extend(rr_interval)

        # Compute QRS interval duration
        qrs_interval = (np.array(self.waves_peaks["ECG_R_Offsets"]) - np.array(
            self.waves_peaks["ECG_R_Onsets"])) / sampling_rate
        self.other_features["ECG_QRS_Interval"].extend(qrs_interval)

        # Compute QRS amplitude
        r_peaks = np.array(self.r_peaks["ECG_R_Peaks"])
        is_nan_r_peaks = np.isnan(r_peaks)
        no_nan_r_peaks = r_peaks[~is_nan_r_peaks]
        q_peaks = np.array(self.waves_peaks["ECG_Q_Peaks"])
        is_nan_q_peaks = np.isnan(q_peaks)
        no_nan_q_peaks = q_peaks[~is_nan_q_peaks]
        s_peaks = np.array(self.waves_peaks["ECG_S_Peaks"])
        is_nan_s_peaks = np.isnan(s_peaks)
        no_nan_s_peaks = s_peaks[~is_nan_s_peaks]

        ecg_values = ECG_TMAPS[f"{self.lead}_value"].tensor_from_file(
            ECG_TMAPS[f"{self.lead}_value"],
            self,
            visit=self.visit,
        )[0][np.append(no_nan_r_peaks,
                       np.append(no_nan_q_peaks,
                                 no_nan_s_peaks)).astype(int, )]

        ecg_r_peaks = ecg_values[:no_nan_r_peaks.shape[0]]
        index = np.where(is_nan_r_offsets)[0]
        index -= np.array(range(index.shape[0])).astype(int)
        ecg_r_peaks = np.append(ecg_r_peaks,
                                np.array([np.nan] * index.shape[0]))
        ecg_r_peaks = np.insert(ecg_r_peaks, index, np.nan)[:r_peaks.shape[0]]

        ecg_q_peaks = ecg_values[no_nan_r_peaks.
                                 shape[0]:no_nan_r_peaks.shape[0] +
                                 no_nan_q_peaks.shape[0]]
        index = np.where(is_nan_q_peaks)[0]
        index -= np.array(range(index.shape[0])).astype(int)
        ecg_q_peaks = np.append(ecg_q_peaks,
                                np.array([np.nan] * index.shape[0]))
        ecg_q_peaks = np.insert(ecg_q_peaks, index, np.nan)[:q_peaks.shape[0]]

        ecg_s_peaks = ecg_values[no_nan_r_peaks.shape[0] +
                                 no_nan_q_peaks.shape[0]:]
        index = np.where(is_nan_s_peaks)[0]
        index -= np.array(range(index.shape[0])).astype(int)
        ecg_s_peaks = np.append(ecg_s_peaks,
                                np.array([np.nan] * index.shape[0]))
        ecg_s_peaks = np.insert(ecg_s_peaks, index, np.nan)[:s_peaks.shape[0]]

        qrs_amplitude = ecg_r_peaks - np.min(
            np.array([ecg_q_peaks, ecg_s_peaks]),
            axis=0,
        )
        self.other_features["ECG_QRS_Amplitude"].extend(list(qrs_amplitude))
Пример #10
0
from anti_Helmholtz_eq import anti_H
from numpy import arange as arra
import numpy as np 
from matplotlib import pyplot as plt 
import math

I = 1000 #(A)
a = 0.07 #(m)
d = 0.09 #(m)
m0 = 4*math.pi*math.pow(10,-7)

z = arra(-0.1,0.11,0.001) #(m)
B = np.empty(211)

for i in range(211):
    B[i] = anti_H(I,z[i],a,d)   

plt.text(-7, -4, r'I = 1000A, a = 7cm, d =9cm', {'color': 'b', 'fontsize': 10})
plt.title("anti_Helmholtz coil B_field") 
plt.xlabel("z-axis (cm)") 
plt.ylabel("B-field(G)") 
plt.plot(100*z,B) 
plt.show()
Пример #11
0
    )

    os.makedirs(os.path.join(save_path, "per_class_all_masks"), exist_ok=True)
    # os.makedirs(os.path.join(save_path, "per_mask_all_classes"), exist_ok=True)

    with torch.no_grad():
        for idx, (preds_batch, _) in enumerate(
                predictor.predict_data_mgr(dmgr, verbose=True)):
            gt = preds_batch["label_seg"]
            preds = preds_batch["pred_seg"][0]
            img = preds_batch["data_orig"][0]
            preds = np.argmax(preds, axis=0)

            img = np.concatenate([img, img, img]) * 255
            gt = np.concatenate([gt == 0, gt == 1, gt == 2])
            preds = np.arra([preds == 0, preds == 1, preds == 2])

            preds = (preds > thresh).astype(np.uint8) * 255
            gt = (gt > thresh).astype(np.uint8) * 255
            # draw_mask_overlay(image=img,
            #                   mask=gt,
            #                   least_squares=True
            #                   ).save(
            #     os.path.join(save_path, "per_mask_all_classes",
            #                  "image_%03d_gt.png" % idx))
            # draw_mask_overlay(image=img,
            #                   mask=preds,
            #                   least_squares=True
            #                   ).save(
            #     os.path.join(save_path, "per_mask_all_classes",
            #                  "image_%03d_pred.png" % idx))
Пример #12
0
    b = '*' * 10
    if make(a) == b:
        print("True")
    else:
        print("False")
a = '*' * 5
func(a)
# Output: True

x = (0, 1, 2)
[a, b, c] = x
print(a + b + c)
# Output: 4

import numpy
arr = numpy.arra([[1, 2, 3], [4, 5, 6]])
arr = arr.reshape(3, 2)
print(arr[1][1])
# Output: 4


def func(n):
    y = '*'.join(str(x) for x in range(1, n, 2))
    return eval(y)

print(func(7))
# Output: 15

def func(x=0):
    n = 0
    n = n + 1
Пример #13
0
model.fit(X_train, y_train)

# predict
y_pred = model.predict(X_test)

# The coefficients
print('Coefficients: \n', model.coef_)
# MSE and RMSE
err = mean_squared_error(y_test, y_pred)
err
rmse_err = np.sqrt(err)
round(rmse_err, 3)

############# Bai 2 #################

X = np.arra([1, 2, 4]).T
Y = np.array([2, 3, 6]).T

plt.axis([0, 5, 0, 8])  # Dinh hinh chieu cao va chieu rong cho bieu do
plt.plot(X, Y, "ro", color="blue")  # "ro" hinh la cua diem(tron)
plt.xlabel("Gia tri thuoc tinh X")
plt.ylabel("Gia tri du doan Y")
plt.show()

# define a function that gonna calulate the linear gradient


def LR2(X, Y, eta, lanlap, theta0, theta1):
    m = len(Y)
    theta00 = theta0
    theta11 = theta1
def run_damage_recovery_experiments(results_path=FOLDER):
    policy_folder = results_path + '/policies/'
    os.makedirs(results_path + 'me_archive_adaptation', exist_ok=True)
    data_folder = results_path + 'me_archive_adaptation/'

    list_envs = damaged_ant_env_ids

    # extract the controllers and their stats into simple text files.
    if not DATA_ALREADY_EXTRACTED:
        thetas = []
        bcs = []
        perfs = []
        obs_mean = []
        obs_std = []
        policy_files = os.listdir(policy_folder)
        cell_ids = []
        for i, policy_file in enumerate(policy_files):
            cell_id = int(policy_file[:-5])
            cell_ids.append(cell_id)

            with open(policy_folder + policy_file, 'rt') as f:
                out = json.load(f)

            thetas.append(out['theta'])
            bcs.append(out['bc'])
            perfs.append(out['performance'])
            obs_mean.append(out['obs_mean'])
            obs_std.append(out['obs_std'])
            print(i / len(policy_files), ' %')

        thetas = np.arra(thetas)
        bcs = np.array(bcs)
        perfs = np.array(perfs)
        obs_mean = np.array(obs_mean)
        obs_std = np.array(obs_std)
        cell_ids = np.array(cell_ids)
        np.savetxt(data_folder + '/bcs.txt', bcs)
        np.savetxt(data_folder + '/perfs.txt', perfs)
        np.savetxt(data_folder + '/thetas.txt', thetas)
        np.savetxt(data_folder + '/obs_mean.txt', obs_mean)
        np.savetxt(data_folder + '/obs_std.txt', obs_std)
        np.savetxt(data_folder + '/cell_ids.txt', cell_ids)
    else:
        # load data
        bcs = np.loadtxt(data_folder + '/bcs.txt')
        perfs = np.loadtxt(data_folder + '/perfs.txt')
        thetas = np.loadtxt(data_folder + '/thetas.txt')
        obs_mean = np.loadtxt(data_folder + '/obs_mean.txt')
        obs_std = np.loadtxt(data_folder + '/obs_std.txt')
        cell_ids = np.loadtxt(data_folder + '/cell_ids.txt')

    candidate_cell_ids = []
    candidate_perfs_after_damage = []
    candidate_perfs_before_damage = []
    best_perf_after_damage_all = []

    n_cells = thetas.shape[0]
    best_cell_id = np.argmax(perfs)
    best_perf = np.max(perfs)
    print('Best score on undamaged agent: {}, from cell id {}'.format(
        best_perf, best_cell_id))

    # loop over all damages defined in interaction.custom_gym.mujoco.test_adapted_envs
    config = CONFIG_DEFAULT
    for env_id in list_envs:
        print('\n\n\n\t\t\t', env_id, '\n\n')

        # build env with damage
        config.update(env_id=env_id)
        env = build_interaction_module(env_id=env_id, args=config)
        rs = np.random.RandomState()

        # run the Map-Based Bayesian Optimization Algorithm
        out = run_M_BOA_procedure(
            env,
            rs,
            bcs,
            perfs,
            thetas,
            obs_mean,
            obs_std,
            cell_ids,
            n_iterations=N_ITERATIONS,
            rho=
            RHO,  # parameter for the kernel function computing the covariance matrix of the GP (better = BC affect each other further away)
            kappa=
            KAPPA,  # exploration parameter for the GP, it tries the policy such as argmax(predicted_perf + KAPPA * uncertainty)
            sigma2_noise=SIGMA2_NOISE,  # predicted noise on performance
            n_evals=N_EVALS,
            best_cell_id=
            best_cell_id,  # id of the best policy before damage, as reference
        )
        best_perf_damaged, candidate_cell_id, candidate_perfs = out
        candidate_cell_ids.append(cell_ids[candidate_cell_id])
        candidate_perfs_after_damage.append(candidate_perfs)
        candidate_perfs_before_damage.append(perfs[candidate_cell_id])
        best_perf_after_damage_all.append(best_perf_damaged)

        np.savetxt(data_folder + 'candidate_ids.txt', candidate_cell_ids
                   )  # cell_id of the recovery policy for each of the damages
        np.savetxt(
            data_folder + 'candidate_perfs_afterdamage.txt',
            candidate_perfs_after_damage
        )  # performances of the recovery policies after damage (10 perf / damage)
        np.savetxt(
            data_folder + 'candidate_perfs_beforedamage.txt',
            candidate_perfs_before_damage
        )  # performances of the recovery policies before damage (10 perfs / damage)
        np.savetxt(data_folder + 'formerbest_id_and_perfs_beforedamage.txt',
                   [int(cell_ids[best_cell_id]), best_perf
                    ])  # performance of the former best policy before damage
        np.savetxt(data_folder + 'formerbest_perfs_afterdamage.txt',
                   best_perf_after_damage_all
                   )  # performance of the former best policy after damage.

        print('Performance of best policy', int(cell_ids[best_cell_id]),
              ' from the archive on undamaged robot: ', best_perf)
        print('Performance of best policy from the archive on damaged robot: ',
              np.mean(best_perf_damaged))
        print('Performance of candidate policy',
              int(cell_ids[candidate_cell_id]),
              ' from the archive on undamaged robot: ',
              perfs[candidate_cell_id])
        print(
            'Performance of candidate policy from the archive on damaged robot: ',
            np.mean(candidate_perfs))
Пример #15
0
from F_scattering import F_scatt
import F_scattering
from numpy import arange as arra
import numpy as np 
from matplotlib import pyplot as plt 
import math

Gamma = 2*math.pi*6.0659*pow(10,6)
l = 780.241209*pow(10,-9)
k = 2*math.pi/l
N = 300
v = arra(-N,N,1) #(m/s)
F_plus = np.empty(2*N)
F_minus = np.empty(2*N)
F_molasses1 = np.empty(2*N)
F_molasses2 = np.empty(2*N)

detuning = -10*Gamma
detuningplus = detuning+k*v
detuningminus = detuning-k*v

Rabi_freq = pow(10,9)

for i in range(2*N):
    F_plus[i] = -F_scatt(detuning + k*v[i],Rabi_freq)
    F_minus[i] = F_scatt(detuning - k*v[i],Rabi_freq)
    F_molasses1[i] =  F_minus[i] + F_plus[i]

detuning = -15*Gamma
detuningplus = detuning+k*v
detuningminus = detuning-k*v