Esempio n. 1
0
def EVal(A): # A is a 2x2 matrix
    
    #Create an If statement that will return an appropriate error if A is not 2x2
    if len(A)!=2:
        sol="Matrix A is not 2x2 - cannot determine eigenvalues"
    elif len(A[0])!=2:
        sol="Matrix A is not 2x2 - cannot determine eigenvalues"
    elif len(A[1])!=2:
        sol="Matrix A is not 2x2 - cannot determine eigenvalues"
    
    #Solve the characteristic equation
    else:
        a=1
        b=-A[0][0]-A[1][1]
        c=(A[0][0]*A[1][1])-(A[0][1]*A[1][0])
        dis=b**2-4*a*c
        if dis>=0:
            #Find a solution to (A-lambda*I)x=0 for both solutions
            #Normalize your solutions
            lambda1=(-b+np.sqrt(dis))/2
            lambda2=(-b-np.sqrt(dis))/2
            
            if A[0][0]-lambda1==0 and A[1][0]==0:
                v1=np.aray([0,1])
            else:
                v1=np.array([-A[0][1]/(A[0][0]-lambda1),1])

            if A[0][0]-lambda2==0 and A[1][0]==0:
                v2=np.array([0,1])
            else:
                v2=np.array([-A[0][1]/(A[0][0]-lambda2),1])

            v1_mag=m.sqrt(v1[0]**2+v1[1]**2)
            v2_mag=m.sqrt(v2[0]**2+v2[1]**2)
            v1_norm=v1/v1_mag
            v2_norm=v2/v2_mag
            sol=[lambda1,v1_norm],[lambda2,v2_norm]
        
        else:
            re=-b/2
            im=(np.sqrt(-dis))/2
            lambda1=np.complex(re,im)
            lambda2=np.complex(re,-im)

            if A[0][0]-lambda1==0 and A[1][0]==0:
                v1=np.aray([0,1])
            else:
                v1=np.array([-A[0][1]/(A[0][0]-lambda1),1])

            if A[0][0]-lambda2==0 and A[1][0]==0:
                v2=np.array([0,1])
            else:
                v2=np.array([-A[0][1]/(A[0][0]-lambda2),1])

            sol=[lambda1,v1],[lambda2,v2]
    
    #Return the solution in the form [lambda_1,v_1],[lambda_2,v_2]
    return sol
def intial_centroid(X, K):
    centroid_algorithm = input(
        'which do you use method computing initial centroid ?(practice,random)  :'
    )
    if centroid_algorithm == 'random':
        centroid = np.zeros((K, X.shape[1]))
        for i in range(K):
            index = np.random.choice(range(X.shape[0]))
            centroid[i, :] = X[index, :]
    if centroid_algorithm == 'practice':
        centroid = np.zeros((K, X.shape[1]))
        centroid[0, :] = np.aray([0, 0])
        centroid[1, :] = np.aray([10, 10])
    return centroid
Esempio n. 3
0
def asColumnmatrix(X):
    if len(X) == 0:
        return np.aray([])
    mat = np.empty((X[0].size, 0), dtype=X[0].dtype)
    for col in X:
        mat = np.hstack((mat, np.asarray(col).reshape(-1, 1)))
    return mat
def condition(dens, ind, A):
    if (dens['bandwidth'].shape[1] > 2 * dens['N']):
        wNew1 = np.zeros(1, getNpts(dens))
        for i in range(0, getNpts(dens)):
            ktmp = kde(getPoints(dens, i), getBW(dens, i), 1, getType(dens))
            wNew1[i] = evaluate(marginal(ktmp, ind), A[ind], 0)
    else:
        bw = getBW(dens, 0)
        if (m_size(A)[1] > 1):  # temp fix
            wNew1 = evaluate(kde(A[ind, 0], bw[ind], 1, getType(dens)),
                             marginal(dens, ind), 0)
        else:
            if (A.size == 1):
                wNew1 = evaluate(kde(A, bw[ind], 1, getType(dens)),
                                 marginal(dens, ind), 0)
            else:
                wNew1 = evaluate(kde(A[ind, 0], bw[ind], 1, getType(dens)),
                                 marginal(dens, ind), 0)

    wNew = wNew1 * getWeights(dens)
    pts = getPoints(dens)

    if (dens['bandwidth'].shape[1] > 2 * dens['N']):
        bw = getBW(dens, np.aray(1, getNpts(dens)))
    else:
        bw = getBW(dens, 1)
    newInd = np.setdiff1d(np.arange(getDim(dens)), ind)
    pp = kde(pts[newInd, :], bw[newInd, :], wNew, getType(dens))
    return pp
Esempio n. 5
0
def make_coordinates(image, line_parameters):
    slope, intercept = line_parameters
    y1 = 300
    y2 = 120
    x1 = int((y1 - intercept) / slope)
    x2 = int((y2 - intercept) / slope)
    return np.aray([x1, y2, x2, y2])
Esempio n. 6
0
def sample_subgoals(env, model, path_index, policy_loss, num_subgoals=3):
    cx = Variable(torch.zeros(1, model.lstm_size))
    hx = Variable(torch.zeros(1, model.lstm_size))
    path_state = env.ep.stats.path
    if len(env.ep.stats.path) - path_index > num_subgoals:
        subgoal_state_indices = np.random.choice(range(path_index + 1,
                                                       len(env.ep.stats.path)),
                                                 size=num_subgoals,
                                                 replace=False)
        subgoal_states = [
            env.ep.stats.path[index] for index in subgoal_state_indices
        ]
    else:
        return policy_loss
    subgoals = [state.hidden_state for state in subgoal_states]
    dist_to_subgoals = [(subgoal_index - (path_index))
                        for subgoal_index in subgoal_state_indices]
    for i in range(len(subgoals)):
        obs, action, reward = env.ep.stats.obs_action_Reward[
            subgoal_state_indices[i]]
        state_obs = obs[0][:env.str_len]
        subgoal = subgoals[i]
        if np.array_equal(state_obs, subgoal):
            continue
        h_d = re.EpState.get_h_d(state_obs, subgoal)
        obs_input = np.concatenate((state_obs, subgoal, np.aray([h_d])))
        obs_input = torch.from_numpy(obs_input).float().unsqueeze(0)
        probs = policy(obs_input)
        m = Categorical(probs)
        prob = probs[0][action].item()
        action = torch.Tensor([action])
        log_prob = m.log_prob(action)
        reward = reward + her_coeff * (prob)**dist_to_subgoals[i]
        policy_loss = policy_loss - reward
        return policy_loss
Esempio n. 7
0
    def get_xyrz(self, data, key):
        # Get data

        if key in self.seed_keys:
            pos = LoadedData(data=self.data).get_seeds(key=key, idx=self.idx)
        elif key in self.streamer_keys:
            pos = self.dirty_get(self.data, key, self.idx)
        else:
            logger.error('Warning! Invalid key ({})'.format(key))
            return np.aray([]), np.aray([]), np.aray([]), np.aray([])

        x = pos[0, :]
        y = pos[1, :]
        z = pos[2, :]
        r = np.sqrt(pos[0, :]**2 + pos[1, :]**2)
        return x, y, r, z
Esempio n. 8
0
def generateValidData(batch_size, data=[]):
    while True:
        print('generateData...')
        valid_data = []
        valid_label = []
        batch = 0
        for i in range(len(data)):
            url = data[i]
            batch += 1
            img = load_img(filepath + '/trains/' + url)
            # print(img)
            valid_data.append(img)

            label = load_img(filepath + '/labels/' + url)
            # 从二维转换为三维,也可以直接用rs读取(直接三维数据)
            label = img_to_array(label)
            valid_label.append(label)

            # 对于一个batch进行打包,使用yield生成
            if batch % batch_size == 0:
                valid_data = np.array(train_data)

                # 这里对于标签数据进行处理,不是很明白。。。
                valid_label = np.aray(train_label).flatten()  # 拍平
                valid_label = labelencoder.transform(
                    train_label)  # 编码,这个方法还需要掌握
                valid_label = to_categorical(train_label,
                                             num_classes=n_label)  # 编码输出便签
                valid_label = train_label.reshape(
                    (batch_size, img_w, img_h, n_label))
                yield (valid_data, valid_label)

                train_data = []
                train_label = []
Esempio n. 9
0
def test_condense(test, number):
    testresults = []
    for i in tqdm((range(np.size(test, 0))) / number):
        sum = 0
        for j in range(number):
            sum += test[i * number + j]
        testresults.append(sum / number)
    return np.aray(testresults)
Esempio n. 10
0
def process_image_dsift(imagename, resultname, size=20, steps=10,
                        force_orienation=False, resize=None):
    """Process an image with densely sampled SIFT descriptors

       and the save the results to file.

       INPUT:
        imagename (string): Input image file location.

        resultname (string): Output image file location.

        size (int): Feature size.

        steps (int): Steps between locations.

        force_orientation (bool): If false all iamges are assumed oriented
                                  upwards.

        resize (tuple): Required image size.

       OUTPUT:
        Dense sift representation of image.
    """

    im = Image.open(imagename).convert('L')
    if resize != None:
        im = im.resize(resize)
    m, n = im.size

    if imagename[-3:,] != 'pgm':
        # create a pgm file
        im.save('tmp.pgm')
        imagename = 'tmp.pgm'

    # create frames and save to temporary file
    scale = size / 3.0
    x, y = np.meshgrid(range(steps, m, steps), range(steps, n, steps))
    xx, yy = x.flatten(), y.flatten(),
    frame = np.aray([xx, yy, scale * np.ones(xx.shape[0]),
                    np.zeros(xx.shape[0])])
    np.savetxt = ('tmp.frame', frame.T, fmt='%03.3f')

    if force_orienation:
        cmmd = str('sift ' + imagename + ' --output= ' + resultname +
                   ' --read-frames = tmp.frame --orientations')
    else:
        cmmd = str('sift ' + imagename + ' --output= ' + resultname +
                   ' --read-frames = tmp.frame')
    os.system(cmmd)
    print('processed', imagename, 'to', resultname)
Esempio n. 11
0
def __ortho__(v):
    """
    Orthogonal sample symmetry to a vector (v) in 3D
    """
    v1 = np.aray([a[0], a[1], a[2]])
    v2 = np.array([-a[0], a[1], a[2]])
    v3 = np.array([a[0], -a[1], a[2]])
    v4 = np.array([-a[0], -a[1], a[2]])
    
    v5 = v1.copy()* -1
    v6 = v2.copy()* -1
    v7 = v3.copy()* -1
    v8 = v4.copy()* -1

    return v1, v2, v3, v4
Esempio n. 12
0
def ens_tau(pars=None, lmax_in=1535, ntest=10):
    global lmax

    if pars is None:
        parr = np.arange(0.01, 0.1, 0.01)
    else:
        parr = np.aray(pars)

    print('tau=', parr)

    dir = ('./ensemble_lmax%d_tau_' % lmax) + time.strftime('%Y-%m-%d_%X')
    if not os.path.isdir(dir):
        os.mkdir(dir)
        print(dir, 'has been created.')

    for par_in in parr:
        test_ensemble(pname='tau', par_in=par_in, ntest=ntest, dir=dir)
Esempio n. 13
0
def softmax(x):
    

def loadGloveModel(gloveFile):
    print ("Loading Glove Model")
    f = open(gloveFile,'r',encoding='utf8')
    model = {}
    for line in f:
        splitLine = line.split()
        word = splitLine[0]
        embedding = [float(val) for val in splitLine[1:]]
        model[word] = embedding
    print ("Done.",len(model)," words loaded!")
    return model

model = loadGloveModel("utils/datasets/glove.6B.50d.txt")
frog = np.array(model["frog"])
cat = np.aray(model["cat"])
lion = np.array(model["lion"])

frog_lion = frog.T.dot(lion)
cat_lion = cat.T.dot(lion)


print("Frog and lion are closer", frog_lion)
print("Cat and lion are closer", cat_lion)


words = "I am talking about a pet."
average = np.zeros_like(np.array(model["dog"]))
for w in words.split():
    if w.lower() in model.keys():
        average +=np.array(model[w.lower()])

average = average/len(w)

predCat = average.T.dot(cat)
predCar = average.T.dot(np.array(model["car"]))

#probabilities
catProb = np.exp(predCat)/(np.exp(predCat)+np.exp(predCar))
carProb = np.exp(predCar)/(np.exp(predCat)+np.exp(predCar))

print("Cat: ",catProb,", Dog: ",carProb)
Esempio n. 14
0
def Check_Predictions(sql=False, num=25):
    
    from Webscraping import USER
    from MachineLearning import Model

    path = USER / r'Dropbox\ん'
    model = Model('deepdanbooru.hdf5')

    if sql:
        
        from Webscraping import CONNECT
        
        MYSQL = CONNECT()
        SELECT = f'''
            SELECT full_path(path), tags, type 
            FROM imagedata 
            WHERE SUBSTR(path, 32, 5) IN ('.jpg', '.png')
            ORDER BY RAND() LIMIT {num}
            '''

        for image, tags, type_ in MYSQL.execute(SELECT, fetch=1):

            tags = sorted(tags.split())
            image = path / image
            prediction = model.predict(image)
            similar = set(tags) & set(prediction)

    else:

        import cv2
        import numpy as np
        from PIL import Image
        from random import choices

        glob = list(path.glob('[0-9a-f]/[0-9a-f]/*jpg'))

        for image in choices(glob, k=num):

            prediction = model.predict(image)

            image_ = np.aray(Image.open(image))
            image_ = cv2.cvtColor(image_, cv2.COLOR_RGB2BGR)
            cv2.imshow(prediction, image_)
            cv2.waitKey(0)
Esempio n. 15
0
def stack_one_step(q_mb_list, q_mf_list, gamma, env, evaluation_budget, treatment_budget, argmaxer,
                   bootstrap_weight_list, intercept=False):
  bootstrap_weight_correction_arr = compute_bootstrap_weight_correction(bootstrap_weight_list)
  y = np.hstack(env.y).astype(float)
  X = np.vstack(env.X)
  X_raw = np.vstack(env.X_raw)
  phi = np.zeros((0, 2))

  # Get targets and features (replicates of correction-weighted y's and X's)
  # (phi refers to q functions as features)
  for b, bootstrap_weight_correction_b in enumerate(bootstrap_weight_correction_arr):
    target_b= np.multiply(y, bootstrap_weight_correction_b)
    phi_b = np.column_stack((q_mb_list[b]()))

  # Get targets (replicates of correction-weighted y's)
  target = np.array([np.multiply(y, bootstrap_correction_b.flatten()) for bootstrap_correction_b in
                     bootstrap_weight_correction_arr])
  target = target.flatten()

  # Get features (replicate sof correction-weighted X's)
  features = np.aray([np.multiply()])
Esempio n. 16
0
def estimate_cl(sky_map, lmax, binary_mask=None, beam_fwhm=0.0, pixwin=False):
    """
    Estimates the TT, EE, BB and TE auto/cross power spectrum.
    The sky map(s) will be masked by the binary mask provided.
    beam_fwhm input is in arcmins. It is converted internally to radians.
    If pixwin is True, the spectra is debeamed with the pixel window function.
    """
    if sky_map.ndim == 1:
        pol = False
    else:
        pol = True

    if binary_mask is not None:
        sky_map_masked = mu.mask_map(sky_map, binary_mask=binary_mask, pol=pol)
        f_sky = mu.get_sky_fraction(binary_mask)
    else:
        sky_map_masked = sky_map
        if hp.maptype == 0:
            f_sky = 1.0
        else:
            f_sky = np.aray([1.0, 1.0, 1.0])

    Bl = hp.gauss_beam(fwhm=np.radians(fwhm / 60.0), lmax=lmax, pol=pol)
    if pixwin:
        pixel_window = hp.pixwin(hp.get_nside(sky_map), pol)

    if pol:
        spectra = hp.anafast(sky_map_masked.filled(), lmax=lmax)[:4]
        spectra /= f_sky[1] * Bl.T**2
        if pixwin:
            spectra[0] /= pixwin[0]**2
            spectra[1:] /= pixwin[1]**2
    else:
        spectra = hp.anafast(sky_map_masked.filled(), lmax=lmax)
        spectra /= f_sky * Bl**2
        if pixwin:
            spectra /= pixel_window**2

    return spectra
Esempio n. 17
0
def auto_drive(neural, past_influence=0.2, frame_rate=10):
    """
    Function to drive automatically using the image from the camera.
    """
    # Initialise the car components
    cam = camera.Camera()
    cam.camera_init()
    mot = motor.Motor()

    # Create a variable that represents the previous decision
    prev_average = np.aray([0, 0, 0, 0])
    # Then we enter a loop
    while True:
        start_time = time.time()
        # I/ La caméra prend une photo
        image = cam.camera.get_image()
        formatted = image_load.format_single_image(image)
        # II/ Conversion via le système de détection de lignes
        modified = image_modification.modify(formatted)
        # III/ La photo est envoyée au réseau de neurone de convolution
        label = neural.predict(modified)[0]

        # IV/ Mise à la moyenne mobile
        choice = (1 - past_influence) * label + past_influence * prev_average
        # V/ Les roues s'activent en fonction de la direction déterminée
        label = image_load.cat_to_label(choice)
        if label == "F":
            mot.forward()
        elif label == "R":
            mot.right()
        elif label == "L":
            mot.left()
        elif label == "B":
            mot.backward()

        # Si le traitement a pris moins de temps que la durée prévue,
        wait(start_time, 1 / frame_rate)
Esempio n. 18
0
def np3dT(data):  # keep last axis the same
    """
    Make a tuple, list or numpy array at least a 3d numpy array and transposed first 2 axes.
    
    Args:
        :data: 
            | tuple, list, ndarray
        
    Returns:
        :returns: 
            | ndarray with .ndim >= 3 and with first two axes 
            | transposed (axis=3 is kept the same).
    """
    if isinstance(
            data, np.ndarray
    ):  # assume already atleast_3d when nd.array (user has to ensure input is an array)
        if (len(data.shape) >= 3):
            return data.transpose((1, 0, 2))
        else:
            return np.expand_dims(np.atleast_2d(data), axis=0).transpose(
                (1, 0, 2))
    else:
        return np.expand_dims(np.atleast_2d(np.aray(data)), axis=0).transpose(
            (1, 0, 2))
from sklearn.preprocessing import MinMaxScaler
import numpy
weights = numpy.aray([[115.], [140.], [175.]])  # 必须为浮点数,整数后面要加个点
scaler = MinMaxScaler()
rescaled_weight = scaler.fit_transform(weights)
# fit_transform()这里进行了两步:
# fit:找出最大最小值进行公式计算
# transform:把数组中所有值进行缩放
import numpy as np

a=np.array([[1.0,2.0],[3.0,4.0]])
print(a)
# [[ 1.  2.]
#  [ 3.  4.]]
a.transpose()#没改变a
# [[ 1.,  3.],
#  [ 2.,  4.]]
np.linalg.inv(a)#矩阵的逆
# [[-2. ,  1. ],
#  [ 1.5, -0.5]]
u=np.eye(3)#2维的单位矩阵 eye=1
# [[ 1.,  0.],
#  [ 0.,  1.]]

j=np.array([[0.0,-1.0],[1.0,0.0]])
np.dot(i,j)
# ([[-1.,  0.],
#   [ 0., -1.]]
np.trace(u)#返回数组对角线的总和。
#2
y=np.aray([[5.],[7.]])
np.linalg.solve(a,y)#线性矩阵方程的解
# [[-3.],
#  [ 4.]]
np.linalg.eig(j)#计算正方形阵列的特征值和特征向量
# array([ 0.+1.j,  0.-1.j]), array([[ 0.70710678+0.j, 0.70710678-0.j],
# [ 0.00000000-0.70710678j,  0.00000000+0.70710678j]])
Esempio n. 21
0
def Uw():
    Uw = np.aray([[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
    return Uw
import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess

M = 1024
N = 1024
H = 512
fs = 44100

spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
mfcc = ess.MFCC(numberCoefficients=12, inputSize=N / 2 + 1)
x = ess.MonoLoader(filename='../sounds/speech-female.wav', sampleRate=fs)()
mfccs = []

for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
    mX = spectrum(window(frame))
    mfcc_bands, mfcc_coeffs = mfcc(mX)
    mfccs.append(mfcc_coeffs)
mfccs = np.aray(mfccs)
Esempio n. 23
0
def ellipse(x_t, a=[1, 1], p=[1, 1]):
    Gamma = np.sum((x_t / np.array(a))**(2 * np.aray(p)))
Esempio n. 24
0
def kmeans_pp(A, k, weighted=True, sparse=False, verbose=False):
    '''
    Returns $k$ initial centers based on the k-means++ initialization scheme.
    With weighted set to True, we have the standard algorithm. When weighted is
    set to False, instead of picking points based on the D^2 distribution, we
    pick the farthest point from the set (careful deterministic version --
    affected by outlier points). Note that this is not deterministic.

    A: nxd data matrix (sparse or dense). 
    k: is the number of clusters.

    Returns a (k x d) dense matrix.

    K-means ++
    ----------
     1. Choose one center uniformly at random among the data points.
     2. For each data point x, compute D(x), the distance between x and
        the nearest center that has already been chosen.
     3. Choose one new data point at random as a new center, using a
        weighted probability distribution where a point x is chosen with
        probability proportional to D(x)2.
     4. Repeat Steps 2 and 3 until k centers have been chosen.
    '''
    n, d = A.shape
    if n <= k:
        if sparse:
            A = A.toarray()
        return np.aray(A)
    index = np.random.choice(n)
    if sparse is True:
        B = np.squeeze(A[index].toarray())
        assert len(B) == d
        inits = [B]
    else:
        inits = [A[index]]
    indices = [index]
    t = [x for x in range(A.shape[0])]
    distance_matrix = distance_to_set(A, np.array(inits), sparse=sparse)
    distance_matrix = np.expand_dims(distance_matrix, axis=1)
    while len(inits) < k:
        if verbose:
            print('\rCenter: %3d/%4d' % (len(inits) + 1, k), end='')
        # Instead of using distance to set we can compute this incrementally.
        dx = np.min(distance_matrix, axis=1)
        assert dx.ndim == 1
        assert len(dx) == n
        dx = dx**2 / np.sum(dx**2)
        if weighted:
            choice = np.random.choice(t, 1, p=dx)[0]
        else:
            choice = np.argmax(dx)
        if choice in indices:
            continue
        if sparse:
            B = np.squeeze(A[choice].toarray())
            assert len(B) == d
        else:
            B = A[choice]
        inits.append(B)
        indices.append(choice)
        last_center = np.expand_dims(B, axis=0)
        assert last_center.ndim == 2
        assert last_center.shape[0] == 1
        assert last_center.shape[1] == d
        dx = distance_to_set(A, last_center, sparse=sparse)
        assert dx.ndim == 1
        assert len(dx) == n
        dx = np.expand_dims(dx, axis=1)
        a = [distance_matrix, dx]
        distance_matrix = np.concatenate(a, axis=1)
    if verbose:
        print()
    return np.array(inits)
 def __init__(self, parent=None):
     super(IV_Controller, self).__init__(parent)
     self.gpib_resource = None
     self.stop_measurement_early = False
     self.Voltage_Sweep_Func = lambda self, input_start, input_end, input_step, time_interval: (
         np.aray(), np.array())
Esempio n. 26
0
from scipy import stats
from pylab import save
from pythonutils.odict import OrderedDict


####################################################################################################
# Define opportunities to commit CD and the reward sensitivity
####################################################################################################
opprtn = np.linspace(0,1,101)
nRwdPnsh = 12
# parameters of the genotype
rspnPrmExternalizing = np.vstack(([.3,.9,.2],[.7,.3,.4]))  # ,[1,.1,.1]
pExt = [k[0]*sp.stats.norm.cdf(opprtn, k[1], k[2]) for k in rspnPrmExternalizing]
#sensitivity_to_Pns = [sp.stats.uniform.rvs(-.5, 1, size=nRwdPnsh), sp.stats.uniform.rvs(-.5, 1, size=nRwdPnsh)]
sensitivity_to_PnsY = [np.array([1, 1, 1.5, .8, 1, .9, .1, 0, .6, .9, 1.2, 1]),
                      np.aray([.8, .8, .7, 1.4, 1.4 ,.3,.3,.4,.5,.5,.5,.4])
sensitivity_to_PnsX = np.linspace(1,12,12)

#rspnPrmResCntrl = np.vstack(([.3,.9,.2],[.5,.3,.4],[1,.1,.1]))
#sensitivity_to_Rwrd = np.vstack(([.3,.9,.2],[.5,.3,.4],[1,.1,.1]));

# parameters of the social system
# pshmnt_Intensity = np.vstack(([.3,.9,.2],[.5,.3,.4],[1,.1,.1]))
# rwrd_Intensity = np.vstack(([.3,.9,.2],[.5,.3,.4],[1,.1,.1]))

freedom = np.array([.2, .9])
adaptability_pnshX = [np.linspace(1,12,12), np.linspace(1,11,6), np.array([3, 8, 10])
adaptability_pnshY = [np.ones(12)/12, np.array([[.2 .2 .1 .1 .2 .2]), np.array(.8,.1,.1), np.array([3, 8, 10])
resp_coherence = [0, 1]
deg_monitor = [.2, .8]
Esempio n. 27
0
import numpy as np

a = np.aray(1, 20)

print(a)
Esempio n. 28
0
def table_to_XY(data):
    """Converts the Orange.Table data to pairs of input and output vectors
    (represented row-wise in two numpy.arrays X, Y)
    suitable to be used as a training/testing set for a Artificial neural network.

    The attributes are created by the Table.to_numpy method. The class attribute(s)
    are transformed as follows:
        - each Continuous class attribute (regression), is assinged one output neuron
            (no scaling is performed on this step)
        - each Discrete class attribute (classification), is assinged one output neuron
            for each discrete value of this class. E.g. in the iris dataset
            (one discrete class attribute noting the name of the flower), we have
            3 neurons.
    """
    if not len(data):
        return numpy.array(), numpy.aray()

    ## prepare the training data
    # classes

    cls_descriptors = filter( lambda desc: desc, [data.domain.class_var] + list(data.domain.class_vars))

    def get_unfolder(descriptor):
        """Unfolds class variable into a number of output neurons' output """
        if isinstance(descriptor, Orange.feature.Continuous):
            def unfold(value):
                return [float(value)]

        elif isinstance(descriptor, Orange.feature.Discrete):
            def unfold(value):
                l = [-1.0] * len(descriptor.values)
                l[int(value)] = 1.0
                return l

        else:
            raise ValueError("Unsupported class variable type '%s'. Must be either Discrete or Continuous."%descriptor.var_type)

        return unfold

    unfolders = map(get_unfolder, cls_descriptors)

    def get_class_values(instance):
        l = []
        if data.domain.class_var:
            l = [instance.get_class()]
        return l + instance.get_classes()

    y = []

    # flatten([[0,0,0,1], [0.44], [1,0]]) =
    # [ 0, 0, 0, 1, 0.44, 1, 0 ]
    flatten = lambda it: list(itertools.chain.from_iterable(it))

    # multi_map([lambda x: x + 1, lambda x: x * 2], [0, 10]) =
    # [1, 20]
    multi_map = lambda Fs, Args : [ f(arg) for f, arg in zip(Fs,  Args) ]

    for instance in data:
        values = get_class_values(instance)
        y.append( flatten(multi_map( unfolders, values )) )

    # attributes
    X = data.to_numpy()[0]
    # classes
    Y = numpy.array(y)

    """
    print "X"
    for instance in data:
        print len(instance)
        print instance
    print "Y"
    print Y
    """
    return X, Y
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd

df = pd.read_csv('breast-cancer.txt')
df.replace('?', -99999, inplace=True)
df.drop(['id'], 1, inplace=True)

X = np.array(df.drop(['class'], 1))
y = np.array([df['class']])

X_train, X_test, y_train, y_test = cross_validation.train_test_split(
    X, y, test_size=0.2)

clf = neighbors.KNeighborsClass()
clf.fit(X_train, y_train)

accuracy = clf.score(X_test, y_test)
print(accuracy)

example_measures = np.aray([4, 2, 1, 1, 1, 3, 2, 1])
example_measures = example_measures.reshape(1, -1)

prediction = clf.predict(example_measures)
print(prediction)
Esempio n. 30
0
 def position(self,p):
     self._position = np.aray(p)
Esempio n. 31
0
import numpy as np
from scipy.stats import norm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
a= np.aray([1,2,3])
print type(a)
print a.shape
print a[0],a[1],a[2]
a[0]=5
print "......."
print norm.cdf(0)
x=np.arrange(0.3*np.pi,0.1)
y=np.sin(x)

plt.plot(x,y)
plt.savefig("plt_test.png")

print "\nDone"