Beispiel #1
0
    def crop_edges(self, x_left_border, x_right_border, y_bottom_border,
                   y_top_border):
        '''
        This function crops the edges of the data. ie. it removes all the
        data between the ranges specified by the user. This shouldn't
        need to be used again as I have saved the cropped data as a
        fits file so can be used freely
        
        '''
        old_data = self.astro_flux
        y_counter = len(old_data) - 1
        x_counter = len(old_data[0]) - 1
        sp.asarray(old_data)

        while y_counter != -1:
            if not (y_bottom_border <= y_counter < y_top_border):
                #removes the whole rows froma 2D array
                old_data = sp.delete(old_data, y_counter, 0)
            y_counter -= 1

        while x_counter != -1:
            if not (x_left_border <= x_counter < x_right_border):
                #removes the whole columns from a 2d array
                old_data = sp.delete(old_data, x_counter, 1)
            x_counter -= 1

        self.astro_flux = old_data
Beispiel #2
0
def main(filename,metric,opts):
  reader = csv.reader(open(filename,'r'),delimiter=',')
  reader.next() # ignore first line
  header = reader.next()
  origModels = header[1:]
  students = origModels[:-4]
  if opts.useHC:
    models = list(origModels)
  else:
    models = list(students)

  results = numpy.zeros([len(students),len(models)])
  for i,row in enumerate(reader):
    if len(origModels) != len(row) - 1:
      print >>sys.stderr,'Bad Size:',len(origModels),len(row)-1
      sys.exit(2)
    for j,v in enumerate(row[1:len(models)+1]):
      results[i,j] = float(v)
  
  # get the arguments we want to call
  args = []
  args.append((results,models,opts.numStudentsToChoose,metric))
  for i,student in enumerate(students):
    tempResults = scipy.delete(results,i,0)
    tempResults = scipy.delete(tempResults,i,1)
    tempModels = list(models)
    del tempModels[i]
    args.append((tempResults,tempModels,opts.numStudentsToChoose,metric))
  if opts.multi:
    pool = Pool()
    res = pool.map(calcBestWrapper,args)
  else:
    res = map(calcBestWrapper,args)
  for student,(bestVal,bestInds,bestModels) in zip(['Overall'] + students,res):
    print '%s,%s' % (student,','.join(bestModels))
Beispiel #3
0
  def scanSound(self, source, minnotel):
    binarized = source
    scale = 60. / self.wavetempo * (binarized[0].size / self.duration)
    noise_length = scale*minnotel

    antinoised = sp.zeros_like(binarized)

    for i in range(sp.shape(binarized)[0]):
      new_line = binarized[i, :].copy()
      diffed = sp.diff(new_line)
      ones_keys = sp.where(diffed == 1)[0]
      minus_keys = sp.where(diffed == -1)[0]
      
      if(ones_keys.size != 0 and minus_keys.size != 0):
        if(ones_keys[0] > minus_keys[0]):
          new_line = self.cutNoise(
              (0, minus_keys[0]), noise_length, new_line)
          minus_keys = sp.delete(minus_keys, 0)

        if(ones_keys[-1] > minus_keys[-1]):
          new_line = self.cutNoise(
              (ones_keys[-1], new_line.size-1), noise_length, new_line)
          ones_keys = sp.delete(ones_keys, -1)

        for j in range(sp.size(ones_keys)):
          new_line = self.cutNoise(
              (ones_keys[j], minus_keys[j]), noise_length, new_line)

        antinoised[i, :] = new_line

    return antinoised
Beispiel #4
0
def calc_coh(subject, conditions, task, meg_electordes_names, meg_electrodes_data, tmin=0, tmax=2.5, sfreq=1000, fmin=55, fmax=110, bw=15, n_jobs=6):
    input_file = op.join(ELECTRODES_DIR, subject, task, 'electrodes_data_trials.mat')
    output_file = op.join(ELECTRODES_DIR, subject, task, 'electrodes_coh.npy')
    d = sio.loadmat(input_file)
    # Remove and sort the electrodes according to the meg_electordes_names
    electrodes = get_electrodes_names(subject, task)
    electrodes_to_remove = set(electrodes) - set(meg_electordes_names)
    indices_to_remove = [electrodes.index(e) for e in electrodes_to_remove]
    electrodes = scipy.delete(electrodes, indices_to_remove).tolist()
    electrodes_indices = np.array([electrodes.index(e) for e in meg_electordes_names])
    electrodes = np.array(electrodes)[electrodes_indices].tolist()
    assert(np.all(electrodes==meg_electordes_names))

    for cond, data in enumerate([d[conditions[0]], d[conditions[1]]]):
        data = scipy.delete(data, indices_to_remove, 1)
        data = data[:, electrodes_indices, :]
        data = downsample_data(data)
        data = data[:, :, :meg_electrodes_data.shape[2]]
        if cond == 0:
            coh_mat = np.zeros((data.shape[1], data.shape[1], 2))

        con_cnd, _, _, _, _ = spectral_connectivity(
            data, method='coh', mode='multitaper', sfreq=sfreq,
            fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=n_jobs, mt_bandwidth=bw, mt_low_bias=True,
            tmin=tmin, tmax=tmax)
        con_cnd = np.mean(con_cnd, axis=2)
        coh_mat[:, :, cond] = con_cnd
    np.save(output_file[:-4], coh_mat)
    return con_cnd
Beispiel #5
0
    def _csv2m(self, csv_link):
        '''
        Import the csv as an array, clipping out strings for bars
        ...

        Arguments
        ---------
        csv_link        : str
                          Path to csv file to be converted into a map

        Returns
        -------
        m               : array
                          Array of floats to be plotted as map
        rows            : list
                          List of tuples (row, color) to locate horizontal bars
        cols            : list
                          List of tuples (col, color) to locate vertical bars    
        '''
        csv = [line.strip('\n').strip('\r').split(',') for line in open(csv_link).readlines()]
        a = np.array(csv)
        rows, cols = [], []
        for i, row in enumerate(a):
            color =  [row[0], row[-1]]
            if 'w' in color or 'b' in color:
                rows.append((i, color[0]))
        for i, col in enumerate(a.T):
            color =  [col[0], col[-1]]
            if 'w' in color or 'b' in color:
                cols.append((i, color[0]))
        m = scipy.delete(a, [i[0] for i in rows], 0)
        m = scipy.delete(m, [i[0] for i in cols], 1)
        return np.array(m, dtype=float), rows, cols
Beispiel #6
0
    def generateNodesAdaptive(self):
        innerDomainSize = self.innerDomainSize
        innerMeshSize   = self.innerMeshSize
        numberElementsInnerDomain = innerDomainSize/innerMeshSize
	assert(numberElementsInnerDomain < self.numberElements)
        domainCenter = (self.domainStart+self.domainEnd)/2
        nodes0 = np.linspace(domainCenter,innerDomainSize/2.0,(numberElementsInnerDomain/2.0)+1.0)
        nodes0 = np.delete(nodes0,-1)
        numberOuterIntervalsFromDomainCenter = (self.numberElements - numberElementsInnerDomain)/2.0
        const = np.log2(innerDomainSize/2.0)/0.5
        exp = np.linspace(const,np.log2(self.domainEnd*self.domainEnd),numberOuterIntervalsFromDomainCenter+1)
        nodes1 = np.power(np.sqrt(2),exp)
        nodesp = np.concatenate((nodes0,nodes1))
        nodesn = -nodesp[::-1]
        nodesn = np.delete(nodesn,-1)
        linNodalCoordinates = np.concatenate((nodesn,nodesp))
        nodalCoordinates = 0

        #Introduce higher order nodes
        if self.elementType == "quadratic" or self.elementType == "cubic":
           if self.elementType == "quadratic":
              numberNodesPerElement = 3 
           elif self.elementType == "cubic":
              numberNodesPerElement = 4

           for i in range(0,len(linNodalCoordinates)-1):
              newnodes = np.linspace(linNodalCoordinates[i],linNodalCoordinates[i+1],numberNodesPerElement)
              nodalCoordinates = np.delete(nodalCoordinates,-1)
              nodalCoordinates = np.concatenate((nodalCoordinates,newnodes))

        else:
           nodalCoordinates = linNodalCoordinates
    
        return nodalCoordinates
Beispiel #7
0
 def filter_and_timetrack(signal):
     # this function prepares data for Lomb-Scargle and FFT periodograms - i.e. filtered cumulative sum of time,
     # filtered signal
     bad_beats = scipy.where(signal.annotation != 0)[0]
     filtered_timetrack = scipy.delete(signal.timetrack, bad_beats)
     filtered_signal = scipy.delete(signal.signal, bad_beats)
     return filtered_signal, filtered_timetrack
Beispiel #8
0
    def gstamp(self, ports_v, time=0, reduced=True):
        """Returns the differential (trans)conductance wrt the port specified by port_index
        when the element has the voltages specified in ports_v across its ports,
        at (simulation) time.

        ports_v: a list in the form: [voltage_across_port0, voltage_across_port1, ...]
        port_index: an integer, 0 <= port_index < len(self.get_ports())
        time: the simulation time at which the evaluation is performed. Set it to
        None during DC analysis.
        """
        indices = ([self.n1 - 1]*2 + [self.n2 - 1]*2,
                   [self.n1 - 1, self.n2 - 1]*2)
        gm = self.model.get_gm(self.model, 0, utilities.tuplinator(ports_v), 0, self.device)
        if gm == 0:
            gm = options.gmin*2
        stamp = np.array(((gm, -gm),
                          (-gm, gm)), dtype=np.float64)
        if reduced:
            zap_rc = [pos for pos, i in enumerate(indices[1][:2]) if i == -1]
            stamp = np.delete(stamp, zap_rc, axis=0)
            stamp = np.delete(stamp, zap_rc, axis=1)
            indices = tuple(zip(*[(i, y) for i, y in zip(*indices) if (i != -1 and y != -1)]))
            stamp_flat = stamp.reshape(-1)
            stamp_folded = []
            indices_folded = []
            for ix, it in enumerate([(i, y) for i, y in zip(*indices)]):
                if it not in indices_folded:
                    indices_folded.append(it)
                    stamp_folded.append(stamp_flat[ix])
                else:
                    w = indices_folded.index(it)
                    stamp_folded[w] += stamp_flat[ix]
            indices = tuple(zip(*indices_folded))
            stamp = np.array(stamp_folded)
        return indices, stamp
Beispiel #9
0
 def gradEval(self, x, data1):
     #gradient is calculated in a vectorized way
     '''by calling stack fn we will create a giant matrix of variables that 
     mirrors the data. Then, we will apply the gradient operation i.e, 2*(w-data).
     Next, we will call the grad_pooling to add the relevant components; here 
     stacking is done horizontally (784 components are stacked horizontall to be exact)'''
     self.dat_temp = data1
     m = float(sp.shape(x)[1])  #no of columns
     self.gd = sp.ones(m)
     self.x_temp = x
     map(self.stack, data1[1], ['gd' for i in range(len(data1[1]))])
     self.gd = sp.delete(
         self.gd, (0),
         axis=0)  #deleting the first row (of ones created above)
     self.grad_vec_l = 2 * (self.gd - data1[0])
     self.grad_vec = sp.ones((10, 1))
     iter_temp = sp.array([i for i in range(784)])
     map(self.grad_pooling, iter_temp)
     self.grad_vec = sp.delete(self.grad_vec, (0), axis=1)
     '''normalizing gradient vector if necessary
     len_vec is a array of length 10 or no of parameters'''
     len_vec = sp.sqrt(
         sp.diagonal(sp.dot(self.grad_vec, sp.transpose(self.grad_vec))))
     for i in range(len(len_vec)):
         if len_vec[i] > 1000:
             self.grad_vec[i, :] = m * self.grad_vec[i, :] / float(
                 len_vec[i])
     return self.grad_vec  #10X784 matrix
Beispiel #10
0
def read_data(is_in_data_file):

    # READ DATE IF IS AVAILABLE
    if (is_in_data_file):
        with open('data/data', 'rb') as f:
            data = pickle.load(f)
            return data

    power_data = sp.delete(sp.genfromtxt("data/Power_history.csv", delimiter=","), 0, 1).flatten()

    weather_data = sp.stack([x.flatten() for x in sp.delete([sp.genfromtxt("data/" + filename, delimiter=",") for filename in filenames], [0, 1], 2)])
    weather_data = sp.delete(weather_data, sp.s_[:18], 1)

    # PREPROCESSING
    # REDUCE BROKEN DATA
    weather_data = sp.stack(x[~sp.isnan(x)] for x in weather_data)
    weather_data = sp.stack(x[~sp.isnan(power_data)] for x in weather_data)
    power_data = power_data[~sp.isnan(power_data)]
    data = sp.vstack([weather_data, power_data])
    data = data.transpose()
    # SELECT FILES TO BE INCLUDED IN COMPUTATION, POWER DATA ARE ALWAYS AT DATA[-1] !!!
    data = preproces(data, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])

    # WRITING OUTPUT DATA TO FILE 'DATA'
    with open('data/data', 'wb') as f:
        pickle.dump(data, f)
    return data
Beispiel #11
0
    def gradEval(self, x, data1):
        '''gradient form is negative for the form shown in the image'''
        self.x_temp = x
        self.dat_temp = data1
        self.fn2 = None
        map(self.secondpart, [i for i in range(10)])
        gd2b = self.fn2  #DSX1

        #fn_stacked creates first parts w's
        self.gd_stacked = sp.ones(
            784)  #next two steps are req for calling stack fn
        map(self.stack, data1[1], ['gd_stacked' for i in range(len(data1[1]))])
        self.gd_stacked = sp.delete(
            self.gd_stacked, (0),
            axis=0)  #deleting the first row (of ones created above)

        gd2a1 = self.gd_stacked * self.dat_temp[0]
        gd2a = sp.exp(gd2a1.sum(axis=1, keepdims=True))  #DSX1

        temp = sp.divide(gd2a, gd2b) * self.dat_temp[0]

        self.grad_vec_l = -1 * self.dat_temp[0] + temp

        self.grad_vec = sp.ones((10, 1))
        iter_temp = sp.array([i for i in range(784)])
        map(self.grad_pooling, iter_temp)
        self.grad_vec = sp.delete(self.grad_vec, (0), axis=1)
        return self.grad_vec
def GetUsefulTables(useful_headers):
    number_of_tables=len(useful_headers)
    list_of_arrays=[]
    constvalues=sci.zeros(number_of_tables)
    stats=sci.zeros(number_of_tables)
    for i in range(number_of_tables):
        useful_header=useful_headers[i]
        useful_table=useful_header.find_next("table")
        rows=useful_table.find_all("tr")
        number_of_cols=len(rows[-1].find_all("td"))
        if(number_of_cols<3):
            continue
        constvalues[i]=GetConstantValue(useful_header)
        stats[i]=GetTableStat(useful_header)
        arr=sci.zeros((len(rows),3))
        
        row_counter=0
        for row in rows:
            cols=row.find_all("td")
            for cell in range(len(cols)):
                arr[row_counter,cell]=cols[cell].get_text()
            row_counter+=1
        
        arr=sci.delete(arr,(0),axis=0)
        if(arr[0,1]==0.):
            arr=sci.delete(arr,(0),axis=0)
        if(arr[-1,1]==1.):
            arr=sci.delete(arr,(-1),axis=0)
        list_of_arrays.append(arr)
    
    final_constvalues=constvalues[constvalues!=0]
    final_stats=stats[constvalues!=0]
    return [list_of_arrays,final_constvalues,final_stats]
def calc_coh(subject, conditions, task, meg_electordes_names, meg_electrodes_data, tmin=0, tmax=2.5, sfreq=1000, fmin=55, fmax=110, bw=15, n_jobs=6):
    input_file = op.join(ELECTRODES_DIR, subject, task, 'electrodes_data_trials.mat')
    output_file = op.join(ELECTRODES_DIR, subject, task, 'electrodes_coh.npy')
    d = sio.loadmat(input_file)
    # Remove and sort the electrodes according to the meg_electordes_names
    electrodes = get_electrodes_names(subject, task)
    electrodes_to_remove = set(electrodes) - set(meg_electordes_names)
    indices_to_remove = [electrodes.index(e) for e in electrodes_to_remove]
    electrodes = scipy.delete(electrodes, indices_to_remove).tolist()
    electrodes_indices = np.array([electrodes.index(e) for e in meg_electordes_names])
    electrodes = np.array(electrodes)[electrodes_indices].tolist()
    assert(np.all(electrodes==meg_electordes_names))

    for cond, data in enumerate([d[conditions[0]], d[conditions[1]]]):
        data = scipy.delete(data, indices_to_remove, 1)
        data = data[:, electrodes_indices, :]
        data = downsample_data(data)
        data = data[:, :, :meg_electrodes_data.shape[2]]
        if cond == 0:
            coh_mat = np.zeros((data.shape[1], data.shape[1], 2))

        con_cnd, _, _, _, _ = spectral_connectivity(
            data, method='coh', mode='multitaper', sfreq=sfreq,
            fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=n_jobs, mt_bandwidth=bw, mt_low_bias=True,
            tmin=tmin, tmax=tmax)
        con_cnd = np.mean(con_cnd, axis=2)
        coh_mat[:, :, cond] = con_cnd
    np.save(output_file[:-4], coh_mat)
    return con_cnd
Beispiel #14
0
    def eigens(self, k):

        import scipy.sparse.linalg as linalg
        import scipy.cluster.vq as vq
        import scipy
        print self.graph.nodes()
        matrix = nx.normalized_laplacian_matrix(self.graph)
        eig_res = linalg.eigsh(matrix, len(self.graph.nodes())-1)[1]

        eig_res = scipy.delete(eig_res, scipy.s_[k+1:], 1)
        eig_res = scipy.delete(eig_res, 0, 1)

        norm = scipy.sqrt(eig_res*eig_res).sum(axis=1)
        eig_res = eig_res/norm.reshape(len(self.graph.nodes()),1)

        result = vq.kmeans2(eig_res, k, iter=100)[1]
        print result
        result = self.knap(k, result)
        print result
        print self.graph.nodes()
        for place in range(0, len(result)):
            app = self.graph.nodes()[place]
            self.servers[result[place]].add_app(app, self.graph.node[app]['size'])

        return result
Beispiel #15
0
def MoveToAisle(t, aisle_q, pass_q, sum_time):
    if (t > sum_time[0]):
        if (aisle_q[0] == -1):
            aisle_q[0] = pass_q[0].copy()
            pass_q = sci.delete(pass_q, 0)
            sum_time = sci.delete(sum_time, 0)
    return aisle_q, pass_q, sum_time
Beispiel #16
0
def marginalize(dist_vars,marg_vars):
    #Initialize marginal dict, same for all dists
    margdist_vars={}
    margdist_vars['dist']=dist_vars['dist']
    #Gaussian
    if dist_vars['dist']=='gaussian':
        N_k=len(dist_vars['w'])#Number of gaussians
        N_D=len(dist_vars['mu'][0])#Dim of orgiginal parameter space
        
        #Initialize remaining components of marg dict, before any marginalization        
        margdist_vars['mu']=dist_vars['mu'][:]
        margdist_vars['cov']=dist_vars['cov'][:]
        margdist_vars['w']=dist_vars['w'][:]
        margdist_vars['vars']=dist_vars['vars'][:]
        
        for marg_var in marg_vars:
            #Get indices of marginalized var in current gaussian
            i_m=margdist_vars['vars'].index(marg_var)
            #Create list of current indices
            i_old=list(range(N_D))
            #remove index of marg_var
            i_old.remove(i_m)
            
            
            #remove marg_var from list of vars
            margdist_vars['vars'].remove(marg_var)
        
            margdist_vars['mu']=[sp.delete(margdist_vars['mu'][i],i_m,0) for i in range(len(margdist_vars['w']))]
            
            #For testing
#            for i in range(N_k):
#                margdist_vars['w'][i]=dist_vars['w'][i]
#                margdist_vars['cov'][i]=sp.delete(sp.delete(margdist_vars['cov'][i],i_m,0),i_m,1)
            
            #Loop over components in mixture
            #marg cov:T_M=L_m-T_m
            #marg weight:w_m=sp.sqrt(2*pi/L_mm)
            for i in range(N_k):
                #invert original covariance matrix
                Lambda=inv(sp.matrix(margdist_vars['cov'][i]))
                #Store marg compononent of 
                L_mm=Lambda[i_m,i_m]
                #Remove marginal component from Lambda
                L_m=sp.delete(sp.delete(Lambda,i_m,0),i_m,1)
                #Construct skew matrix
                l_m=sp.matrix(Lambda[i_m,i_old]+Lambda[i_old,i_m])
                T_m=l_m.T*l_m/(4*L_mm)
                #Construct marginalized covariance matrix
                margdist_vars['cov'][i]=sp.asarray(inv(L_m-T_m))
                #Scale weight
                margdist_vars['w'][i]=sp.sqrt(2*sp.pi/L_mm)*dist_vars['w'][i]
            
            #Update dimensions of marginalized parameter space
            N_D=N_D-1
         
        return margdist_vars
            
                
                
Beispiel #17
0
 def lab_reduce(y_true, y_score):
     empty_indices = scan_empty(y_true)
     i = 0
     for k in empty_indices:
         y_true = scipy.delete(y_true, k-i, 1)
         y_score = scipy.delete(y_score, k-i, 1)
         i += 1
     return y_true, y_score
Beispiel #18
0
 def lab_reduce(y_true, y_score):
     empty_indices = scan_empty(y_true)
     i = 0
     for k in empty_indices:
         y_true = scipy.delete(y_true, k-i, 1)
         y_score = scipy.delete(y_score, k-i, 1)
         i += 1
     return y_true, y_score
def load_structural(fname):
    data = np.genfromtxt(fname, delimiter=',')
    # removing first column and first row, because they're headers
    data = scipy.delete(data, 0, 1)
    data = scipy.delete(data, 0, 0)
    # format it to be subjects x variables
    data = data.T
    return data
def main():
    '''
    Breast Cancer data set
    '''
    # Get the breast cancer data
    cancer_data = np.loadtxt("breast-cancer-wisconsin.data", delimiter=',', dtype=str)
    # All the missing values are subsitutes to 0.0
    cancer_data[cancer_data == "?"] = 0.0
    # Extract the cancer ids from the given input
    cancer_id = cancer_data[:, :1]
    # Extract the features from the given input
    input_matrix = cancer_data[:, 1:-1]
    # Extract the output labels
    labels = cancer_data[:, -1]
    # Instantiation of Logistic Regression
    # Regularization to avoid overfitting
    logistic_classifier = LogisticRegression(C=0.5, max_iter = 900)
    # Splitting the datas into training and testing
    # Could have split into training , test and cross-valdation to avoid overfitting.
    train_set, test_set, train_class_label, test_class_label = train_test_split(input_matrix, labels, train_size = 0.5, test_size=0.5, random_state=10)
    # To ease, all the values are converted to float
    train_set=np.array(train_set,dtype=float)
    test_set=np.array(test_set,dtype=float)
    train_class_label=np.array(train_class_label,dtype=float)
    test_class_label=np.array(test_class_label,dtype=float)
    '''Train a machine learning model with the given training set'''
    logistic_classifier.fit(train_set, train_class_label)
    '''
    Titanic Data set
    '''
    titanic_data = np.loadtxt("train.csv", delimiter=',', dtype=str)
    titanic_data[titanic_data == "?"] = 0.0
    titanic_data[titanic_data == ""] = 0.0
    labels = titanic_data[1:, 1]
    # To Ease, all the string columns are removed so that the logistic regression model can be built easily
    # Columns removed are : Passenger Id, Name, Pclass, Embarkment, Sex, Cabin
    # Traveller info contains the information of the passenger's name, id and sex
    titanic_data = titanic_data[1:, 2:-1]
    titanic_data = scipy.delete(titanic_data, [1,2,3,7,9], 1)
    titanic_data=np.array(titanic_data,dtype=float)
    titanic_logistic_classifier = LogisticRegression(C=0.5, max_iter = 900)
    titanic_logistic_classifier.fit(titanic_data, labels)
    
    # Test set of titanic data set
    titanic_test_set = np.loadtxt("test.csv", delimiter=',', dtype=str)
    titanic_test_set[titanic_test_set == "?"] = 0.0
    titanic_test_set[titanic_test_set == ""] = 0.0
    
    # Slice the features from the input
    # To Ease, all the string columns are removed so that the logistic regression model can be built easily
    # Columns removed are : Passenger Id, Name, Pclass, Embarkment, Sex, Cabin
    # Traveller info contains the information of the passenger's name, id and sex
    traveller_info = titanic_test_set[1:, :5]
    titanic_test_set = titanic_test_set[1:, 1:]
    titanic_test_set = scipy.delete(titanic_test_set, [1,2,3,7,9], 1)
    titanic_test_set=np.array(titanic_test_set,dtype=float)
    # Calling the function correlate date
    correlate_data_sets(test_set, logistic_classifier, titanic_test_set, titanic_logistic_classifier, traveller_info, cancer_id)
    def __init__(self, opts):
        self.train_file = opts["train_file"]
        self.test_file = opts["test_file"]
        self.out_file = opts["out_file"]

        self.learning_rate = opts["learning_rate"]
        self.decay_rate = opts["decay_rate"]
        self.batch_size = opts["batch_size"]
        self.n_iter = opts["n_iter"]
        self.shuffle = opts["shuffle"]
        self.holdout_size = opts["holdout_size"]
        self.l2 = opts["l2"]
        self.standardization = opts["standardize"]
        self.loss_method = opts["loss"]
        self.use_adagrad = opts["adagrad"]
        self.use_rmsprop = opts["rmsprop"]
        self.hash_trick_mod = opts["hash"]

        print opts

        train_data = read_data(self.train_file)
        test_data = read_data(self.test_file)

        self.test_input = np.ones((test_data.shape[0], test_data.shape[1] + 1), dtype=np.float)
        self.test_input[:, 1:] = test_data[:, :]
        self.test_initial = np.array(self.test_input)
        self.test_output = np.zeros(test_data.shape[0])

        self.input = np.ones(train_data.shape, dtype=np.float)
        self.input[:, 1:] = train_data[:, :-1]
        self.output = train_data[:, -1:].transpose(1, 0)[0]

        self.validation_input = np.array([])
        self.validation_output = np.array([])
        if self.holdout_size:
            holdout_part = int(self.holdout_size * self.input.shape[0])
            random_rows = random.sample(range(self.input.shape[0]), holdout_part)
            self.validation_input = self.input[random_rows, :]
            self.validation_output = self.output[random_rows]
            self.input = scipy.delete(self.input, random_rows, 0)
            self.output = scipy.delete(self.output, random_rows)

        self.learning_input = np.array(self.input)
        self.learning_output = np.array(self.output)

        if self.hash_trick_mod != 0:
            self.learning_input = hash_trick(self.learning_input, self.hash_trick_mod)
            self.validation_input = hash_trick(self.validation_input, self.hash_trick_mod)
            self.test_input = hash_trick(self.test_input, self.hash_trick_mod)

        if self.standardization:
            standardize(self.learning_input)
            standardize(self.validation_input)
            standardize(self.test_input)

        self.w = np.zeros(self.learning_input.shape[1], dtype=np.float)
        self.adagrad_cache = np.zeros(len(self.w))
        self.rmsprop_cache = np.zeros(len(self.w))
Beispiel #22
0
 def removeFactors(self, idx, axis=0):
     super().removeFactors(idx, axis)
     self.p_cov_inv = s.delete(self.p_cov_inv, axis=0, obj=idx)
     self.p_cov_inv_diag = s.delete(self.p_cov_inv_diag, axis=0, obj=idx)
     self.K = self.dim[1]
     if not self.length_scales is None:
         self.length_scales = s.delete(self.length_scales, obj=idx)
     if not self.struct is None:
         self.struct = s.delete(self.struct, obj=idx)
    def condenseMatrix(self,H):
        
        # applyBoundaryConditions on Hx Hy Hz
        H = np.delete(H,0,0)
        H = np.delete(H,-1,0)
        H = np.delete(H,0,1)
        H = np.delete(H,-1,1)

        return H
Beispiel #24
0
 def delete_invalid_data(self, value=0.0):
     r"""
     .. todo:: The explicite dependency on cathode current needs to be removed
     """
     rows = sp.where(self._data[self._objectives[0]] == value)
     self._logger.warning('Deleting invalid data rows: ' + str(rows))
     sp._data = sp.delete(self._data, rows, axis=0)
     for key in self._datadict.keys():
         self._datadict[key] = sp.delete(self._datadict[key], rows, axis=0)
Beispiel #25
0
 def delete_invalid_data(self,value=0.0):
     r"""
     .. todo:: The explicite dependency on cathode current needs to be removed
     """
     rows=sp.where(self._data[self._objectives[0]]==value)
     self._logger.warning('Deleting invalid data rows: '+str(rows))
     sp._data=sp.delete(self._data,rows,axis=0)
     for key in self._datadict.keys():
         self._datadict[key] = sp.delete(self._datadict[key],rows,axis=0)
Beispiel #26
0
    def removeDimensions(self, axis, idx):
        # Method to remove undesired dimensions
        # - axis (int): axis from where to remove the elements
        # - idx (numpy array): indices of the elements to remove
        assert axis <= len(self.dim)
        assert s.all(idx < self.dim[axis])

        self.params["alpha"] = s.delete(self.params["alpha"],
                                        axis=axis,
                                        obj=idx)
        self.expectations["E"] = s.delete(self.expectations["E"],
                                          axis=axis,
                                          obj=idx)
        self.expectations["E2"] = s.delete(self.expectations["E2"],
                                           axis=axis,
                                           obj=idx)

        if self.axis_cov == 1:  #K has shape (N,D,D) for mean of shape (N,D)
            if axis == 0:
                self.params["K"] = s.delete(self.params["K"], axis=0, obj=idx)
            else:
                self.params["K"] = s.delete(self.params["K"], axis=1, obj=idx)
                self.params["K"] = s.delete(self.params["K"], axis=2, obj=idx)

        else:  #K has shape (D,N,N) for mean of shape (N,D)
            if axis == 0:
                self.params["K"] = s.delete(self.params["K"], axis=1, obj=idx)
                self.params["k"] = s.delete(self.params["K"], axis=2, obj=idx)
            else:
                self.params["K"] = s.delete(self.params["K"], axis=0, obj=idx)
        self.updateDim(axis=axis, new_dim=self.dim[axis] - len(idx))
Beispiel #27
0
def rankUsingPCA(fileName):
        fp = open(fileName)
        line = fp.readline()
        firstLine = line.strip().split(',')
        fp.close()
        #names = numpy.array(firstLine[1:-1])
        names = numpy.array(firstLine[1:])

        #print names.shape
        print names
        dataMat = loadDataSet(fileName)
        #print dataMat
        meanVals = mean(dataMat, axis=0)
        meanRemoved = dataMat - meanVals
        covMat = cov(meanRemoved, rowvar=0)
        eigVals,eigVects = linalg.eig(mat(covMat))
        eigValInd = argsort(eigVals)
        eigValInd = eigValInd[:-(999999+1):-1]
        redEigVects = eigVects[:,eigValInd]
        #lowDMat, reconMat = pca(dataMat)
        lowDDataMat = meanRemoved * redEigVects
        T = redEigVects.getA()
        print T
        # calculate the variance covered by each components in PCA
        percentagePCA = calculateFractionOfVarianceExplainedByPCA(lowDDataMat)
        for d in range(T.shape[0]):
                T[:,d] = T[:,d] * percentagePCA[d]

        #print T
        rankMatrix = {}
        rank = 0
        while(T.shape[0] > 1 and T.shape[1] > 1):
                rowMax = -99999
                index = 0
                maxIndex = -1
                for r in T:
                  valMax = numpy.amax(r)
                  if (valMax > rowMax):
                    rowMax = valMax
                    maxIndex = index
                  #endif
                  index = index + 1
                #endfor
                print names[maxIndex]
                rankMatrix[names[maxIndex]] = rank
                rank = rank + 1
                T = scipy.delete(T,maxIndex,0)
                #print T
                names = scipy.delete(names,maxIndex,0)
                #print names
        #end while
        print names[0]
        rankMatrix[names[0]] = rank
        return rankMatrix
Beispiel #28
0
def betaExperiment(coefs, exps, splitSize, n_neighbour):
    Dimnsion = 9
    featureDim = Dimnsion - 1
    Density = 200
    predictions = []
    myMetricAccuracy_test = []
    myMetricAccuracy_train = []

    path = "./abalone.txt"

    for c in coefs:
        for e in exps:

            Labels = [
                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29
            ]
            myParser = Paeser(similarDataFilePath=path,
                              splitSize=splitSize,
                              Flag=1,
                              Dimension=Dimnsion,
                              Labels=Labels)
            Data = myParser.DataGen(Density)
            beta = c * (len(Data[1][0])**e)
            mLearner = Learner(beta, Data[0][0], Data[0][1], featureDim - 1)
            lambdaVec = mLearner.computeLambda(len(Data[0][1]))
            M = mLearner.compute_M(np.ravel(lambdaVec))
            mLearner.Metric = M
            mKNN = KNNClassifier(M, n_neighbour)

            if not mLearner.is_pos_def(M):
                M = mKNN._getAplus(M)
                mKNN.setM(M)
            #print(M)
            X_train = Data[1][0]
            X_train = np.reshape(X_train, (len(X_train), 8))
            y_train = X_train[:, 7]
            X_train = scipy.delete(X_train, 7, 1)
            X_test = Data[1][1]
            X_test = np.reshape(X_test, (len(X_test), 8))
            y_test = X_test[:, 7]
            X_test = scipy.delete(X_test, 7, 1)
            predictions = []
            mKNN.fit(X_train, y_train)
            mKNN.setM(M)
            predictions = mKNN.predict(X_test)
            myMetricAccuracy_test.append(
                [mLearner.getAccuracy(y_test, predictions), [c, e]])
            predictions = mKNN.predict(X_train)
            myMetricAccuracy_train.append(
                mLearner.getAccuracy(y_train, predictions))
            print("great")
    return sorted(myMetricAccuracy_test,
                  key=getKey), sorted(myMetricAccuracy_train)
Beispiel #29
0
    def forward_selection_JM(self, x, y, delta=0.1, maxvar=None, ncpus=None):
        '''
        '''
        ## Get some information from the variable
        C = int(y.max(0))
        # Number of classes
        n = x.shape[0]  # Number of samples
        d = x.shape[1]  # Number of variables
        if ncpus is None:
            ncpus = mp.cpu_count()  # Get the number of core

        ## Initialization
        r = 0  # Initialization of the counter
        variable = sp.arange(d)  # At step zero: d variables available
        ids = []  # and no selected variable
        JMD = []  # list of the evolution the OA estimation
        if maxvar is None:
            maxvar = sp.floor(
                d /
                5)  # Select at max 20 % of the original number of variables

        while (r < maxvar):
            JMd = sp.zeros(variable.size)
            pool = mp.Pool(processes=ncpus)
            processes = [
                pool.apply_async(compute_JFD, args=(v, self, x, y, ids, ind))
                for ind, v in enumerate(variable)
            ]
            pool.close()
            pool.join()
            for p in processes:
                ind, jm = p.get()
                JMd[ind] = jm

            ## Select the variable that provides the highest loocv
            t = sp.argmax(JMd)  # get the indice of the maximum of loocv
            JMD.append(JMd[t])  # add the value to loo
            if r == 0:
                ids.append(
                    variable[t])  # add the selected variable to the pool
                variable = sp.delete(
                    variable,
                    t)  # remove the selected variable from the initial set
            elif (variable.size == 0) or ((
                (JMD[r] - JMD[r - 1]) / JMD[r - 1] * 100) < delta):
                JMD.pop()
                break
            else:
                ids.append(variable[t])
                variable = sp.delete(variable, t)
            r = r + 1

        ## Return the final value
        return ids, JMD
Beispiel #30
0
def remove_from_hierarchy(obj, remove_half_orphans=True):
    """ Removes a Neo object from the hierarchy it is embedded in. Mostly
    downward links are removed (except for possible links in
    :class:`neo.core.Spike` or :class:`neo.core.SpikeTrain` objects).
    For example, when ``obj`` is a :class:`neo.core.Segment`, the link from
    its parent :class:`neo.core.Block` will be severed. Also, all links to
    the segment from its spikes and spike trains will be severed.

    :param obj: The object to be removed.
    :type obj: Neo object
    :param bool remove_half_orphans: When True, :class:`neo.core.Spike`
        and :class:`neo.core.SpikeTrain` belonging to a
        :class:`neo.core.Segment` or :class:`neo.core.Unit` removed by
        this function will be removed from the hierarchy as well, even
        if they are still linked from a :class:`neo.core.Unit` or
        :class:`neo.core.Segment`, respectively. In this case, their
        links to the hierarchy defined by ``obj`` will be kept intact.
    """
    classname = type(obj).__name__

    # Parent for arbitrary object
    if classname in neo.description.many_to_one_relationship:
        for n in neo.description.many_to_one_relationship[classname]:
            p = getattr(obj, n.lower())
            if p is None:
                continue
            l = getattr(p, classname.lower() + 's', ())
            try:
                l.remove(obj)
            except ValueError:
                pass

    # Many-to-many relationships
    if isinstance(obj, neo.RecordingChannel):
        for rcg in obj.recordingchannelgroups:
            try:
                idx = rcg.recordingchannels.index(obj)
                if rcg.channel_indexes.shape[0] == len(rcg.recordingchannels):
                    rcg.channel_indexes = sp.delete(rcg.channel_indexes, idx)
                if rcg.channel_names.shape[0] == len(rcg.recordingchannels):
                    rcg.channel_names = sp.delete(rcg.channel_names, idx)
                rcg.recordingchannels.remove(obj)
            except ValueError:
                pass

    if isinstance(obj, neo.RecordingChannelGroup):
        for rc in obj.recordingchannels:
            try:
                rc.recordingchannelgroups.remove(obj)
            except ValueError:
                pass

    _handle_orphans(obj, remove_half_orphans)
Beispiel #31
0
 def get_Seed(self, col):
     # Seeds for hedging basket, based on alternating OLS weights
     a = np.zeros(shape=(self.covar.shape[0], self.covar.shape[1]))
     a[:] = self.covar[:]
     a = sp.delete(a, col, 0)
     a = sp.delete(a, col, 1)
     a = np.linalg.inv(a)
     b = self.covar[:, col]
     b = sp.delete(b, col, 0)
     c = a * b
     c = np.insert(c, col, -1, axis=0)
     return -c
Beispiel #32
0
def remove_from_hierarchy(obj, remove_half_orphans=True):
    """ Removes a Neo object from the hierarchy it is embedded in. Mostly
    downward links are removed (except for possible links in
    :class:`neo.core.Spike` or :class:`neo.core.SpikeTrain` objects).
    For example, when ``obj`` is a :class:`neo.core.Segment`, the link from
    its parent :class:`neo.core.Block` will be severed. Also, all links to
    the segment from its spikes and spike trains will be severed.

    :param obj: The object to be removed.
    :type obj: Neo object
    :param bool remove_half_orphans: When True, :class:`neo.core.Spike`
        and :class:`neo.core.SpikeTrain` belonging to a
        :class:`neo.core.Segment` or :class:`neo.core.Unit` removed by
        this function will be removed from the hierarchy as well, even
        if they are still linked from a :class:`neo.core.Unit` or
        :class:`neo.core.Segment`, respectively. In this case, their
        links to the hierarchy defined by ``obj`` will be kept intact.
    """
    classname = type(obj).__name__

    # Parent for arbitrary object
    if classname in neo.description.many_to_one_relationship:
        for n in neo.description.many_to_one_relationship[classname]:
            p = getattr(obj, n.lower())
            if p is None:
                continue
            l = getattr(p, classname.lower() + 's', ())
            try:
                l.remove(obj)
            except ValueError:
                pass

    # Many-to-many relationships
    if isinstance(obj, neo.RecordingChannel):
        for rcg in obj.recordingchannelgroups:
            try:
                idx = rcg.recordingchannels.index(obj)
                if rcg.channel_indexes.shape[0] == len(rcg.recordingchannels):
                    rcg.channel_indexes = sp.delete(rcg.channel_indexes, idx)
                if rcg.channel_names.shape[0] == len(rcg.recordingchannels):
                    rcg.channel_names = sp.delete(rcg.channel_names, idx)
                rcg.recordingchannels.remove(obj)
            except ValueError:
                pass

    if isinstance(obj, neo.RecordingChannelGroup):
        for rc in obj.recordingchannels:
            try:
                rc.recordingchannelgroups.remove(obj)
            except ValueError:
                pass

    _handle_orphans(obj, remove_half_orphans)
Beispiel #33
0
def redheffer_matrix(n):
    a = np.zeros((n + 1, n + 1))
    for i in xrange(1, n + 1):
        a[i, 1] = 1
        for j in xrange(1, n + 1):
            if j % i == 0:
                #print i,j
                a[i, j] = 1
    sp.delete(a, 0, 0)

    a = np.delete(a, (0), axis=0)
    a = np.delete(a, (0), axis=1)
    return a
Beispiel #34
0
 def removeFactors(self, idx, axis=1):
     """
     Method to remove factors
     """
     if self.Kg is not None:
         self.Kg.removeFactors(idx)
     if self.Kc is not None:
         self.Kc.removeFactors(idx)
     self.zeta = s.delete(self.zeta, axis=0, obj=idx)
     self.updateDim(0, self.dim[0] - len(idx))
     self.K = self.K - 1
     self.Sigma  = s.delete(self.Sigma, axis=0, obj=idx)
     self.Sigma_inv  = s.delete(self.Sigma_inv, axis=0, obj=idx)
     self.Sigma_inv_logdet  = s.delete(self.Sigma_inv_logdet, axis=0, obj=idx)
Beispiel #35
0
def rem_borders(img, u, d, l, r):
    for i in range(u):
        img = sc.delete(img, 0, 0)
    img = np.flipud(img)
    for i in range(d):
        img = sc.delete(img, 0, 0)
    img = np.flipud(img)
    for i in range(l):
        img = sc.delete(img, 0, 1)
    img = np.fliplr(img)
    for i in range(r):
        img = sc.delete(img, 0, 1)
    img = np.fliplr(img)
    return img
def crop(cropped, orignalx, originaly):
    newx, newy, z = final.shape
    bx = (newx - originalx) / 2
    by = (newy - originaly) / 2

    removex = list(xrange(bx))
    cropped = delete(cropped, removex, 0)
    removey = list(xrange(by))
    cropped = delete(cropped, removey, 1)
    removex = list(xrange(newx - 2 * bx, newx - bx))
    removey = list(xrange(newy - 2 * by, newy - by))
    cropped = delete(cropped, removex, 0)
    cropped = delete(cropped, removey, 1)
    return cropped
Beispiel #37
0
def rem_borders (img, u, d, l, r):
    for i in range(u):
        img = sc.delete(img, 0, 0)
    img = np.flipud(img)
    for i in range(d):
        img = sc.delete(img, 0, 0)
    img = np.flipud(img)
    for i in range(l):
        img = sc.delete(img, 0, 1)
    img = np.fliplr(img)
    for i in range(r):
        img = sc.delete(img, 0, 1)
    img = np.fliplr(img)
    return img
def test_tfm2 (n, addnoise=False):
    
    Tms = make_obs()
    Tsm = np.linalg.inv(Tms)
    I = np.eye(4)
    
    M_final = np.empty((0,16))
    
    for i in range(n):
        del_Ts = make_obs(0.05, 0.05)
        if addnoise:
            noise = make_obs(0.01,0.01)
            del_Tm = Tms.dot(del_Ts.dot(Tsm)).dot(noise)
        else:
            del_Tm = Tms.dot(del_Ts.dot(Tsm))
        print "Observation %i"%(i+1)
        print "Delta Ts:"
        print del_Ts
        print "Delta Tm:"
        print del_Tm, '\n'
        
        M = np.kron(I,del_Tm)-np.kron(del_Ts.T,I)
        
        M_final = np.r_[M_final, M]
    
    
    L_final = -1*M_final[:,15]
    M_final = scp.delete(M_final, (3,7,11,15), 1) 

            
    X = np.linalg.lstsq(M_final,L_final)[0]
    Tfm = np.reshape(X,(3,4),order='F')
    print Tfm.shape
    Tfm = np.r_[Tfm,np.array([[0,0,0,1]])]

    np.set_printoptions(precision=5)    
    
    print Tfm
    print Tms
    
    R = Tfm[0:3,0:3]
    print R.T.dot(R)
    
    X2 = scp.delete(np.reshape(Tms,16,order="F"),(3,7,11,15),0)
    print X2
    
    if not addnoise:
        assert(np.allclose(M_final.dot(X2),L_final, atol=0.001))
        assert (np.allclose(Tfm,Tms, atol=0.001))
Beispiel #39
0
 def bundle(self, cov, indices):
     # Bundle the covariance matrix
     diag, a = 0, 0
     # Add clustering column and row
     for i in indices:
         a += cov[:, i]
     # Compute diagonal element
     for i in indices:
         diag += a[i]
     cov = self.expandCov(cov, a, diag)
     # Remove clustered columns and rows
     for i in range(len(indices)):
         cov = delete(cov, indices[i] - i, 0)
         cov = delete(cov, indices[i] - i, 1)
     return cov
Beispiel #40
0
    def removeDimensions(self, axis, idx):
        """ General method to remove undesired dimensions

        PARAMETERS
        ----------
        axis: int
            axis from where to remove the elements
        idx: list or numpy array
            indices of the elements to remove
        """
        assert axis <= len(self.dim)
        assert s.all(idx < self.dim[axis])
        for k in self.params.keys(): self.params[k] = s.delete(self.params[k], idx, axis)
        for k in self.expectations.keys(): self.expectations[k] = s.delete(self.expectations[k], idx, axis)
        self.updateDim(axis=axis, new_dim=self.dim[axis]-len(idx))
def solve_sylvester2 (tfms1, tfms2):
    """
    Solves the system of Sylvester's equations to find the calibration transform.
    Returns the calibration transform from sensor 1 (corresponding to tfms1) to sensor 2.
    This functions forces the bottom row to be 0,0,0,1 by neglecting columns of M and changing L.
    """

    assert len(tfms1) == len(tfms2) and len(tfms1) >= 2
    I = np.eye(4)
    I_0 = np.copy(I)
    I_0[3,3] = 0
        
    M_final = np.empty((0,16))

    s1_t0_inv = np.linalg.inv(tfms1[0])
    s2_t0_inv = np.linalg.inv(tfms2[0])
    
    print "\n CONSTRUCTING M: \n"
    
    for i in range(1,len(tfms1)):
        del1 = np.linalg.inv(tfms1[i]).dot(tfms1[0])
        del2 = np.linalg.inv(tfms2[i]).dot(tfms2[0])

        print "\n del1:"
        print del1
        print del1.dot(I_0).dot(del1.T)
        print "\n del2:"
        print del2, '\n'
        print del2.dot(I_0).dot(del2.T)
        
        M = np.kron(I, del1) - np.kron(del2.T,I)
        M_final = np.r_[M_final, M]
    
    L_final = -1*np.copy(M_final[:,15])
    M_final = scp.delete(M_final, (3,7,11,15), 1) 

    X = np.linalg.lstsq(M_final,L_final)[0]
    print M_final.dot(X) - L_final
    
    X2 = (np.reshape(scp.delete(np.eye(4),3,0),12,order="F"))
    print M_final.dot(X2) - L_final
    
    tt = np.reshape(X,(3,4),order='F')
    tt = np.r_[tt,np.array([[0,0,0,1]])]
    
    print tt.T.dot(tt)
    
    return tt
Beispiel #42
0
def generate_dataset(x0, y0, z0, N, t_delta, cases):
    """
    Generate full dataset for all the sigma, beta, rho parameters
    considered in the 'cases' list. Each member of list is a tuple
    of fixed sigma, beta and rho values of the attractor.

    Inputs:
    x0: Integer for the initial condition for the x-position
    y0: Float64 for the initial condition for the y-position
    z0: Integer for the initial condition for the z-position
    N: Integer for the total number of steps of the solver
    t_delta: Float64 for the step size of the solver
    cases: List of tuple, where each tuple is of the type (sigma, beta, rho)
    """
    dataset = sp.zeros([1, 8])  # Just to create the first concatenate

    for sigma, beta, rho in cases:
        x, y, z = sv.compute_states(x0, y0, z0, sigma, beta, rho, N, t_delta)

        # Give data an array format
        data = generate_data_case(sigma, beta, rho, N, t_delta, x, y, z)
        dataset = sp.concatenate((dataset, data), axis=0)

    dataset = sp.delete(dataset, (0), axis=0)  # Remove the first row of zeros

    return dataset
 def nms(boxes, T=0.5):
     if len(boxes) == 0:
         return []
     boxes = boxes.astype("float")
     pick = []
     x1 = boxes[:, 0]
     y1 = boxes[:, 1]
     x2 = boxes[:, 2]
     y2 = boxes[:, 3]
     area = (x2 - x1 + 1) * (y2 - y1 + 1)
     idxs = sp.argsort(y2)
     while len(idxs) > 0:
         last = len(idxs) - 1
         i = idxs[last]
         pick.append(i)
         xx1 = sp.maximum(x1[i], x1[idxs[:last]])
         yy1 = sp.maximum(y1[i], y1[idxs[:last]])
         xx2 = sp.minimum(x2[i], x2[idxs[:last]])
         yy2 = sp.minimum(y2[i], y2[idxs[:last]])
         w = sp.maximum(0, xx2 - xx1 + 1)
         h = sp.maximum(0, yy2 - yy1 + 1)
         I = w * h
         #overlap_ratio = I / area[idxs[:last]]
         overlap_ratio = I / (area[i] + area[idxs[:last]] - I)
         idxs = sp.delete(
             idxs, sp.concatenate(([last], sp.where(overlap_ratio > T)[0])))
     return boxes[pick].astype("int")
def cutMatrix(matrix,columnlist):
	columnlist= sorted(columnlist, reverse = True);
	newMatrix = matrix
	print columnlist;
	for col in columnlist:
		newMatrix= scipy.delete(newMatrix,col,1)
	return newMatrix
    def get_usgs_n(self):
        if self.get_usgsrc() == 0:
            return
        self.get_values(
        )  # Fetch usgsq,usgsh,handq,handh,handarea,handrad,handslope, handstage

        # Find indices for integer stageheight values in usgsh, and apply to usgsq
        usgsidx = scipy.where(scipy.equal(scipy.mod(
            self.usgsh, 1), 0))  # Find indices of integer values in usgsh
        usgsh = self.usgsh[usgsidx]
        usgsq = self.usgsq[usgsidx]

        # Find indices where usgsh[usgsidx] occur in handstage, and apply to handarea and handrad
        handidx = scipy.where(scipy.in1d(self.handstage, usgsh))
        area = self.handarea[handidx]
        hydrad = self.handrad[handidx]

        # Remove usgsq values for duplicate usgsh heights (keep first instance only)
        if usgsh.shape != area.shape:
            for i in range(usgsh.shape[0]):
                if i == 0: pass
                elif usgsh[i] == usgsh[i - 1]:
                    usgsq = scipy.delete(usgsq, i)

        # Calculate average manning's n after converting discharge units
        disch = usgsq  #*0.0283168 # Convert cfs to cms
        self.usgsroughness_array = self.mannings_n(area=area,
                                                   hydrad=hydrad,
                                                   slope=self.handslope,
                                                   disch=disch)
        self.usgsroughness = scipy.average(self.usgsroughness_array)
        print 'Average roughness: {0:.2f}'.format(self.usgsroughness)
def solve_sylvester3 (tfms1, tfms2):
    """
    Solves the system of Sylvester's equations to find the calibration transform.
    Returns the calibration transform from sensor 1 (corresponding to tfms1) to sensor 2.
    This functions forces the bottom row to be 0,0,0,1 by neglecting columns of M and changing L.
    Delta transfrom from previous iteration.
    """

    assert len(tfms1) == len(tfms2) and len(tfms1) >= 2
    I = np.eye(4)
        
    M_final = np.empty((0,16))
    
    print "\n CONSTRUCTING M: \n"
    
    for i in range(1,len(tfms1)):
        s1_inv = np.linalg.inv(tfms1[i-1])
        s2_inv = np.linalg.inv(tfms2[i-1])
        M = np.kron(I, s1_inv.dot(tfms1[i])) - np.kron(s2_inv.dot(tfms2[i]).T,I)
        M_final = np.r_[M_final, M]
    
    L_final = -1*np.copy(M_final)[:,15]
    M_final = scp.delete(M_final, (3,7,11,15), 1) 

    X = np.linalg.lstsq(M_final,L_final)[0]
    print M_final.dot(X) - L_final
    
    tt = np.reshape(X,(3,4),order='F')
    tt = np.r_[tt,np.array([[0,0,0,1]])]
    
    print tt.T.dot(tt)
    
    return tt
Beispiel #47
0
def predict(X, regressor, lon_ind=0, lat_ind=1, n_splits=3):

    # obtain training and testing indices
    kf = LongFold(n_splits=n_splits)
    train_indices = []
    test_indices = []
    for train_index, test_index in kf.split(X):
        train_indices.append(train_index)
        test_indices.append(test_index)

    X = scipy.delete(X, [lon_ind, lat_ind], 1)

    # machine learning with feature and obtain predicted log thickness
    y_pred_whole = np.empty([
        X.shape[0],
    ])

    # get training and testing indices then do machine learning
    for i in range(len(test_indices)):
        test_index = test_indices[i].tolist()

        X_test = X[test_index, :]

        # predict
        y_pred = regressor.predict(X_test)

        y_pred_whole[test_index] = y_pred

    return y_pred_whole
Beispiel #48
0
def bipolarize_data(data, labels):
    bipolar_electrodes = []
    if isinstance(data, dict):
        single_trials = True
        bipolar_data = {}
        for key in data.keys():
            bipolar_data[key] = np.zeros(data[key].shape)
    else:
        single_trials = False
        bipolar_electrodes_num = calc_bipolar_electrodes_number(labels)
        bipolar_data = np.zeros((bipolar_electrodes_num, data.shape[1], data.shape[2]))
    bipolar_data_index = 0
    for index in range(len(labels) - 1):
        elc1_name = labels[index].strip()
        elc2_name = labels[index + 1].strip()
        elc_group1, _ = utils.elec_group_number(elc1_name)
        elc_group2, _ = utils.elec_group_number(elc2_name)
        if elc_group1 == elc_group2:
            elec_name = '{}-{}'.format(elc2_name, elc1_name)
            bipolar_electrodes.append(elec_name)
            if single_trials:
                for key in data.keys():
                    bipolar_data[key][:, bipolar_data_index, :] = (data[key][:, index, :] + data[key][:, index + 1, :]) / 2.
            else:
                bipolar_data[bipolar_data_index, :, :] = (data[index, :, :] + data[index + 1, :, :]) / 2.
            bipolar_data_index += 1
    if single_trials:
        for key in data.keys():
            bipolar_data[key] = scipy.delete(bipolar_data[key], range(bipolar_data_index, len(labels)), 1)
    return bipolar_data, bipolar_electrodes
def delcomun(X,total):
    print "A apagar palavras mais comuns"
    import scipy as sc
    import operator
    cut=[]
    count={}
    for palavra in total:
        count[palavra]=sum(X[:,total[palavra]])
    
    for i in xrange(25):
        maxi=max(count.iteritems(), key=operator.itemgetter(1))[0]
                
        
        x=0
        for cutted in cut:
            if total[maxi]>cutted:
                x+=1
       
        X=sc.delete(X,total[maxi]-x,1)
       
        cut.append(total[maxi])
        del total[maxi]
        del count[maxi]
    
    for indice,palavra in enumerate(total):
        total[palavra]=indice
            
    return X,total
 def nms(boxes, T = 0.5):
     if len(boxes) == 0:
         return []
     boxes = boxes.astype("float")
     pick = []
     x1 = boxes[:,0]
     y1 = boxes[:,1]
     x2 = boxes[:,2]
     y2 = boxes[:,3]    
     area = (x2 - x1 + 1) * (y2 - y1 + 1)
     idxs = sp.argsort(y2)    
     while len(idxs) > 0:
         last = len(idxs) - 1
         i = idxs[last]
         pick.append(i)
         xx1 = sp.maximum(x1[i], x1[idxs[:last]])
         yy1 = sp.maximum(y1[i], y1[idxs[:last]])
         xx2 = sp.minimum(x2[i], x2[idxs[:last]])
         yy2 = sp.minimum(y2[i], y2[idxs[:last]])
         w = sp.maximum(0, xx2 - xx1 + 1)
         h = sp.maximum(0, yy2 - yy1 + 1)
         I = w * h
         #overlap_ratio = I / area[idxs[:last]]
         overlap_ratio = I /(area[i] +  area[idxs[:last]] - I)
         idxs = sp.delete(idxs, sp.concatenate(([last], sp.where(overlap_ratio > T)[0])))
     return boxes[pick].astype("int")
Beispiel #51
0
def trim_fftconvolve(image):
    """
    Removes invalid rows and columns from a convolved image after
    fftconvolve with the "same" option.

    Arguments:
    - `image`: input image for trimming
    """

    # remove invalid edge
    image = delete(image, 0, 0)
    image = delete(image, 0, 1)
    image = delete(image, image.shape[1]-1, 0)
    image = delete(image, image.shape[0]-1, 1)

    return image
def removeAttribute(X,y,attribute,attributeNames):
    attributeNamesWithoutAttr = np.copy(attributeNames)
    attributeNamesWithoutAttr = numpy.delete(attributeNames,attribute).tolist()
    yWithoutAttr = X[:,attribute]
    XWithoutAttr = np.copy(X)
    XWithoutAttr = scipy.delete(XWithoutAttr,attribute,1)
    return (XWithoutAttr, yWithoutAttr,attributeNamesWithoutAttr)
def do_this_when_the_mouse_is_clicked(this_event):
    global coords_array 
    global point_handles_array
    x = this_event.xdata
    y = this_event.ydata
    ### If the click is outside the range, then clear figure and points list
    if this_event.xdata is None: # This means we clicked outside the axis
        clear_the_figure_and_empty_points_list()
    else: # We clicked inside the axis
        number_of_points = scipy.shape(coords_array)[0]
        if number_of_points > 0:
            point_to_be_deleted = check_if_click_is_on_an_existing_point(x,y)  
            if point_to_be_deleted != -1: # We delete a point
                # We will delete that row from coords_array. The rows are axis 0
                coords_array = scipy.delete(coords_array,point_to_be_deleted,0)
                # We will also hide that point on the figure, by finding its handle
                handle_of_point_to_be_deleted = point_handles_array[point_to_be_deleted]
                pylab.setp(handle_of_point_to_be_deleted,visible=False)
                # Now that we have erased the point with that handle,
                # we can delete that handle from the handles list
                point_handles_array = scipy.delete(point_handles_array,point_to_be_deleted)
            else:  # We make a new point
                coords_array = scipy.vstack((coords_array,[x,y]))
                this_point_num = scipy.shape(coords_array)[0]
                new_point_handle = pylab.plot(x,y,'*',color='blue')
                point_handles_array = scipy.append(point_handles_array,new_point_handle) 
        if number_of_points == 0:
            coords_array = scipy.array([[x,y]])
            this_point_num = scipy.shape(coords_array)[0]
            new_point_handle = pylab.plot(x,y,'*',color='blue')
            point_handles_array = scipy.append(point_handles_array,new_point_handle)
        ### Now plot the statistics that this program is demonstrating
        number_of_points = scipy.shape(coords_array)[0] # Recount how many points we have now
        if number_of_points > 1: 
            plot_the_mean_std_and_normal()
        ### Finally, check to see whether we have fewer than two points
        ### as a result of any possible point-deletions above.
        ### If we do, then delete the stats info from the plot, 
        ### as it isn't meaningful for just one data point
        number_of_points = scipy.shape(coords_array)[0]  
        if number_of_points < 2: # We only show mean and std if there are two or more points
            pylab.setp(handle_of_normal_curve_plot,visible=False)
            pylab.setp(handle_of_mean_plot,visible=False)
            pylab.setp(handle_of_std_lines,visible=False)        
            pylab.xlabel('')
        # Set the axis back to its original value, in case Python has changed it during plotting
        pylab.axis([-axis_x_range, axis_x_range, axis_y_lower_lim, axis_y_upper_lim])
    def fit_scatter(ap_ind, stat) :
        """ Derives a fit of the scatter from the given singe aperture
        statistics dictionary. """

        result=[]
        all_deriv=all_linear_terms(config.mphotref.rms_fit_param, stat)[0]
        derivatives=all_deriv[:]
        num_free_coef=len(derivatives)
        alllogscatter=log10(array(stat['scatter']))
        logscatter=alllogscatter
        start_source_count=len(logscatter)
        error_func=lambda coef: dot(coef, derivatives)-logscatter
        deriv_func=lambda coef: derivatives
        initial_guess=zeros(num_free_coef)
        for rej_iter in range(config.mphotref.rej_iterations) :
            if len(logscatter)<num_free_coef : 
                print sph_header
                print len(logscatter)
                raise Error.Numeric("Rejecting outliers resulted in less "
                                    "points than fit parameters when "
                                    "fitting log10(magnitude scatter) "
                                    "for aperture %d of field %s, camera %s,"
                                    " %s frames."%
                                    (ap_ind, sph_header['OBJECT'],
                                     sph_header['CMPOS'],
                                     sph_header['IMAGETYP']))
            coefficients, covariance, info_dict, msg, status=leastsq(
                error_func, initial_guess, Dfun=deriv_func, col_deriv=1,
                full_output=1)
            if status not in [1, 2, 3, 4] :
                raise Error.Numeric("Linear least squares fitting of the "
                                    "scatter of the single photometry "
                                    "magnitudes for aperture %d of field %s,"
                                    " camera %s, %s frames failed: %s"%
                                    (ap_ind, sph_header['OBJECT'],
                                     sph_header['CMPOS'],
                                     sph_header['IMAGETYP'], msg))
            bad_ind, fit_res=rejected_indices(info_dict['fvec'])
            if not bad_ind : break
            if rej_iter==config.mphotref.rej_iterations-1 : break
            derivatives=map(lambda d : delete(d, bad_ind), derivatives)
            logscatter=delete(logscatter, bad_ind)
        if db is not None :
            fit_to_db(coefficients, ap_ind, fit_res, start_source_count,
                      len(logscatter))
        return alllogscatter-dot(coefficients, all_deriv), fit_res
 def parseData(self):
     X =np.genfromtxt(self.sFilePath)
     X_unlabled=scipy.delete(X, 7, 1)
     y=X[:,7]
     #y = np.loadtxt(self.sFilePath,usecols=range(1))
     #for i in range(len(x)):
     #    z.append(x[i]-y[i])
     return [X,X_unlabled,y]
Beispiel #56
0
 def findBestRes(self,mat):
     if len(mat)==1:
         return mat[0][0]
         
     matStr = self.matCoder(mat)
     if matStr in list(self.memo):
         return self.memo[matStr]
     
     maxRes = -np.inf
     for i in range(len(mat)):
         tempMat = sci.delete(mat,i,1)
         tempRes = mat[0][i]+self.findBestRes(sci.delete(tempMat,0,0))
         if tempRes>=maxRes:
             maxRes = tempRes
         
     self.memo[matStr] = maxRes
     return maxRes
def betaExperiment(coefs,exps,splitSize,n_neighbour):
    Dimnsion=9
    featureDim=Dimnsion-1
    Density=200
    predictions=[]
    myMetricAccuracy_test=[]
    myMetricAccuracy_train=[]
    
    path="./abalone.txt"
    
    
    for c in coefs:
        for e in exps:
            
            
            Labels=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29]
            myParser= Paeser(similarDataFilePath=path,splitSize=splitSize,Flag=1,Dimension=Dimnsion,Labels=Labels)
            Data=myParser.DataGen(Density)
            beta= c*(len(Data[1][0])**e)
            mLearner=Learner(beta,Data[0][0],Data[0][1],featureDim-1)
            lambdaVec=mLearner.computeLambda(len(Data[0][1]))
            M=mLearner.compute_M(np.ravel(lambdaVec))
            mLearner.Metric=M
            mKNN=KNNClassifier(M,n_neighbour)
            
            if not mLearner.is_pos_def(M):
                M=mKNN._getAplus(M)
                mKNN.setM(M)
            #print(M)
            X_train=Data[1][0]
            X_train=np.reshape(X_train, (len(X_train),8))
            y_train=X_train[:,7]
            X_train=scipy.delete(X_train, 7, 1)
            X_test=Data[1][1]
            X_test=np.reshape(X_test,(len(X_test),8))
            y_test=X_test[:,7]
            X_test=scipy.delete(X_test, 7, 1)
            predictions=[]
            mKNN.fit(X_train,y_train)
            mKNN.setM(M)
            predictions=mKNN.predict(X_test)
            myMetricAccuracy_test.append([mLearner.getAccuracy(y_test, predictions),[c,e]])
            predictions=mKNN.predict(X_train)
            myMetricAccuracy_train.append(mLearner.getAccuracy(y_train, predictions))
            print("great")
    return sorted(myMetricAccuracy_test,key=getKey),sorted(myMetricAccuracy_train)