示例#1
0
    def __profilePrep(self, input_dir):
        Io.deleteDirectory(self.path_provider.output_tmp_path)
        Io.makeDirectories(self.path_provider.output_tmp_path)
        Io.copyFiles(src=input_dir,
                     dest=self.path_provider.output_tmp_path,
                     file_names=self.file_names,
                     file_name_regex="*.tif")

        print('Profiling')
        Io.deleteDirectory(self.path_provider.output_data_path)
        Io.makeDirectories(self.path_provider.output_data_path)
        Tr.profileToProfile(input_data=self.path_provider.output_tmp_path,
                            out_path=self.path_provider.output_data_path)

        Io.copyFiles(src=input_dir,
                     dest=self.path_provider.output_data_path,
                     file_names=self.file_names,
                     file_name_regex="*.TFW")

        # print('Translating into one file...')
        # Io.makeDirectories(self.path_provider.output_data_path)
        # Tr.translateIntoOneFile(input_data=self.path_provider.output_tmp2_path,
        #                         out_path=self.path_provider.output_data_path)

        print('Cleaning temp dirs...')
        Io.deleteDirectory(self.path_provider.output_tmp_path)
示例#2
0
    def translate(self, chainpos, angle, axis):
        """Eigentlich eine transrotate funktion
        Zuerst wird translatiert dann rotiert
        
        """

        if chainpos == 1:

            RotMat  = tr.rotation_matrix(angle, axis)

            oldori  = self.orients[0]
            newori  = np.dot(RotMat,oldori)

            self.orients[0] = newori

            return self


        if chainpos > 1:

            point   = self.roots[chainpos-1] # hier ändern
            RotMat  = tr.rotation_matrix(angle, axis, point)

            oldroot = self.roots[chainpos-1].toggle_trans()
            newroot = np.dot(RotMat,oldroot)[:3,:1] # mal länge

            oldori  = self.orients[chainpos-1]()
            newori  = np.dot(tr.unit_vector(RotMat),oldori)

            self.roots[chainpos-1]   = newroot
            self.orients[chainpos] = newori

            return self
示例#3
0
    def profileMerge(self, input_dir):
        self.__profilePrep(input_dir)

        print('Merging...')
        Io.deleteFile(self.path_provider.merged_file)
        Io.makeDirectories(self.path_provider.output_merged_path)
        Tr.gdalMerge(input_data=self.path_provider.output_data_path,
                     out_file=self.path_provider.merged_file)
示例#4
0
 def generate_datasets(self):
     self.ip_data_folder = IP_Folder.get()
     self.op_data_folder = OP_Folder.get()
     #
     #op_mvavg_data_folder = OP_Folder.get()+'\\MovingAverage'
     if self.dataset_radio_button == 2:
         self.op_sqrt_data_folder = self.op_data_folder + '\\SquareRoot'
         self.create_folder(self.op_sqrt_data_folder)
         algos.square_root(self.ip_data_folder, self.op_sqrt_data_folder)
示例#5
0
    def basicMerge(self, input_dir):
        Io.deleteDirectory(self.path_provider.output_data_path)
        Io.makeDirectories(self.path_provider.output_data_path)
        Io.copyFiles(src=input_dir,
                     dest=self.path_provider.output_data_path,
                     file_names=self.file_names)

        print('Merging...')
        Io.deleteFile(self.path_provider.merged_file)
        Io.makeDirectories(self.path_provider.output_merged_path)
        Tr.gdalMerge(input_data=self.path_provider.output_data_path,
                     out_file=self.path_provider.merged_file,
                     is_pct=True)
示例#6
0
def process_wine(filename):
    red_wine = numpy.genfromtxt(filename, delimiter=";", skip_header=True)

    # deletes rows containing NaN values
    red_wine = red_wine[~numpy.isnan(red_wine).any(axis=1)]

    # classifies wines into 1 and 0
    for row in red_wine:
        if row[-1] > 5:
            row[-1] = 1
        else:
            row[-1] = 0

    # removes outliers from selected columns
    list_del = {3, 4, 9}
    for i in list_del:
        li = Transformations.find_outliers(red_wine[:, i], 1.8)
        for j in li:
            red_wine = numpy.delete(red_wine, j, axis=0)

    # adds features
    # y = red_wine[:, -1]
    # x = red_wine[:, 0:-1]
    # x = Transformations.add_feature(x, [1, 4])
    # red_wine = numpy.concatenate((x, y.reshape(y.shape[0], 1)), axis=1)

    # selects features
    # red_wine = Transformations.select_feature(red_wine, [8, 3])

    # normalizes each column
    for i in range(red_wine.shape[1]):
        red_wine[:, i] = normalize(red_wine[:, i])
    return red_wine
示例#7
0
def commandLinePrompt(current_steps, current_ingredients):
    user_input = float(commandLineIntro(current_steps, current_ingredients))

    #Ingredients
    if user_input == 0:
        Ingredients.main(current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)
    elif user_input == 1:
        Steps.tools(current_steps, current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)
    elif user_input == 2:
        Steps.methods(current_steps, current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)
    elif user_input == 3:
        Steps.steps(current_steps, current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)
    elif user_input == 4:
        current_ingredients = ServingSizeTransform.main(current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)

    # Healthier
    elif user_input == 5:
        current_ingredients, current_steps = Transformations.main(
            3, current_steps, current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)
    # Less Healthy
    elif user_input == 6:
        current_ingredients, current_steps = Transformations.main(
            4, current_steps, current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)
    # Vegetarian
    elif user_input == 7:
        current_ingredients, current_steps = Transformations.main(
            1, current_steps, current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)
    # Non-Vegetarian
    elif user_input == 8:
        current_ingredients, current_steps = Transformations.main(
            2, current_steps, current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)
    elif user_input == 9:
        return
    elif user_input == 10:
        current_ingredients, current_steps = Transformations.main(
            5, current_steps, current_ingredients)
        user_input = commandLinePrompt(current_steps, current_ingredients)
示例#8
0
def _shift_sys(md_sys, radius, radius_buffer=1):
    """
    """
    # enlarge radius so atoms do not clash
    radius += radius_buffer
    rn_pos = agm.points_on_sphere(npoints=1, ndim=3, radius=radius)[0]
    mx_trans = cgt.translation_matrix(rn_pos)
    sys_add_all_atms = list(range(len(md_sys.atoms)))
    md_sys.mm_atm_coords(-1, mx_trans, False, *sys_add_all_atms)
示例#9
0
 def transform(self):  # Tranforms ECI to obs
     self.obs = np.copy(
         self.X)  # Copies the values of X just to have the same dimensions
     for i in range(0, len(self.sats)):  # Goes through each sat
         for i2 in range(0, np.ma.size(self.X, axis=1)):  # For all time
             self.obs[i][i2] = tr.ECI2obs(
                 self.X[i][i2][0], self.X[i][i2][1], self.X[i][i2][2],
                 self.gs_llh[0], self.gs_llh[1], self.gs_llh[2],
                 self.time[i2])  # This function is in Transformations.py
示例#10
0
def main():
    img = cv2.imread('Lenna.jpg', 3)
    print(img.shape)
    img = tr.RGB2YCBCR(img)
    Y, Cr, Cb = cv2.split(img)

    pathlib.Path('./SecondAttempt').mkdir(parents=True, exist_ok=True)

    (Sl, (Sh1, Sv1, Sd1), (Sh2, Sv2, Sd2)) = pywt.wavedec2(Y, 'db1', level=2)

    ReducedCb = downscale_local_mean(Cb, (2, 2))
    ReducedCr = downscale_local_mean(Cr, (2, 2))

    CbPlus = ReducedCb
    CbMinus = ReducedCb

    for i in range(0, ReducedCb.shape[0]):
        for j in range(0, ReducedCb.shape[1]):
            if ReducedCb[i, j] < 0:
                CbPlus[i, j] = 0
            elif ReducedCb[i, j] > 0:
                CbMinus[i, j] = 0

    CrPlus = ReducedCr
    CrMinus = ReducedCr

    for i in range(0, ReducedCr.shape[0]):
        for j in range(0, ReducedCr.shape[1]):
            if ReducedCr[i, j] < 0:
                CrPlus[i, j] = 0
            elif ReducedCb[i, j] > 0:
                CrMinus[i, j] = 0

    ReducedCbMinus = downscale_local_mean(CbMinus, (2, 2))

    # Sd1 = ReducedCbMinus
    # Sh2 = CrPlus
    # Sv2 = CbPlus
    # Sd2 = CrMinus

    NewYSecondTry = pywt.waverec2(
        (Sl, (Sh1, Sv1, ReducedCbMinus), (CrPlus, CbPlus, CrMinus)), 'db1')
    cv2.imwrite("./SecondAttempt/NewYSecondTry.jpg", NewYSecondTry)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    h = hf.Halftone('./SecondAttempt/NewYSecondTry.jpg')
    h.make(angles=[0, 15, 30, 45],
           antialias=True,
           percentage=10,
           sample=1,
           scale=2,
           style='grayscale')
示例#11
0
def arb_rot_matrix(vector):
    """
    Rotate a vector so it aligns with a randomly generated vector. Return the
    rotation matrix.
    """
    vt_u = agv.get_unit_vt(vector)  # scale vector to length of 1
    # generate random vector of length 1
    rand_vt = points_on_sphere(1, ndim=3, radius=1)[0]
    # get angle phi between vt_u and rand_vt
    phi = agv.get_ang(vt_u, rand_vt)
    rot_axis = np.cross(vt_u, rand_vt)
    Mr = cgt.rotation_matrix(phi, rot_axis)
    return Mr
示例#12
0
def get_dihedral(ptI, ptJ, ptK, ptL, return_cross=False):
    """
    Define a dihedral/improper between the points I, J, K and L. Defines the planes
    IJK and JKL (if improper is intended to be used, consider right order of atoms).
    Sources: http://cbio.bmt.tue.nl/pumma/index.php/Theory/Potentials
             http://www.vitutor.com/geometry/distance/line_plane.html
             http://kitchingroup.cheme.cmu.edu/blog/2015/01/18/Equation-of-a-plane-through-three-points/
    """
    if not isinstance(ptI, np.ndarray):
        ptI = np.array(ptI)

    if not isinstance(ptJ, np.ndarray):
        ptJ = np.array(ptJ)

    if not isinstance(ptK, np.ndarray):
        ptK = np.array(ptK)

    if not isinstance(ptL, np.ndarray):
        ptL = np.array(ptL)

    # plane IJK
    v1 = ptI - ptJ
    v2 = ptJ - ptK
    # the cross product v1xv2 is a vector normal to both vectors (i.e. to the plane)
    cp1 = np.cross(v1, v2)

    # plane JKL
    v1 = ptJ - ptK
    v2 = ptJ - ptL
    cp2 = np.cross(v1, v2)

    # angle between two planes is the same as the two vectors which are orthogonal
    # to each plane
    if return_cross is True:
        return (cgt.angle_between_vectors(cp1, cp2), cp1, cp2)
    else:
        return cgt.angle_between_vectors(cp1, cp2)
示例#13
0
 def transforms(self):
     
     composed = transforms.Compose([
                                     tfs.Rescale(out_size=self.rescale_shape),
                                     tfs.ReCrop(out_size =  (self.nn_input_image_shape)),
                                     tfs.Rotate(rotations = self.rotation_angles,rotation_probabilities = self.rotation_angle_probabilities),
                                     tfs.Flip(flip_probabilites = self.flip_hor_ver_probabilities),
                                     tfs.Apply_Gaussian_filter(probabity=self.gaussian_filter_probability),
                                     tfs.Apply_Unsharpmask_filter(probabity=self.unsharpmask_filter_probability)
     ])
     return composed
示例#14
0
def get_angle(ptI, ptJ, ptK):
    """
    Bla.

    Get angle between three points I, J and K.
    """
    if not isinstance(ptI, np.ndarray):
        ptI = np.array(ptI)

    if not isinstance(ptJ, np.ndarray):
        ptJ = np.array(ptJ)

    if not isinstance(ptK, np.ndarray):
        ptK = np.array(ptK)

    v1 = ptI - ptJ
    v2 = ptK - ptJ
    return cgt.angle_between_vectors(v1, v2)
示例#15
0
def stateVectorUpdate(stateVec,del_t):
    
    transition_quat = getTransitionQuat(stateVec[10:13],del_t)
    
                                
    result_quat = trnsfrm.quaternion_multiply(stateVec[3:7],transition_quat)
    
    return np.array([   [stateVec[0] + (stateVec[7]*del_t)],             #0
                        [stateVec[1] + (stateVec[8]*del_t)],             #1
                        [stateVec[2] + (stateVec[9]*del_t)],             #2
                        [result_quat[0]],     #3
                        [result_quat[1]],        #4
                        [result_quat[2]],        #5
                        [result_quat[3]],        #6
                        [stateVec[7]],           #7
                        [stateVec[8]],           #8
                        [stateVec[9]],           #9
                        [stateVec[10]],       #10
                        [stateVec[11]],       #11
                        [stateVec[12]]    ])  #12
示例#16
0
def stateVectorUpdate(stateVec, del_t):

    transition_quat = getTransitionQuat(stateVec[10:13], del_t)

    result_quat = trnsfrm.quaternion_multiply(stateVec[3:7], transition_quat)

    return np.array([
        [stateVec[0] + (stateVec[7] * del_t)],  #0
        [stateVec[1] + (stateVec[8] * del_t)],  #1
        [stateVec[2] + (stateVec[9] * del_t)],  #2
        [result_quat[0]],  #3
        [result_quat[1]],  #4
        [result_quat[2]],  #5
        [result_quat[3]],  #6
        [stateVec[7]],  #7
        [stateVec[8]],  #8
        [stateVec[9]],  #9
        [stateVec[10]],  #10
        [stateVec[11]],  #11
        [stateVec[12]]
    ])  #12
示例#17
0
 def find_passes(self, grav, minel):
     i = 0
     first = 1
     counter = 0
     for i in range(0, len(self.sats)):
         tcomp = datetime(2000, 1, 1, 0, 0, 0, 0)
         for i2 in range(0, np.ma.size(self.X, axis=1)):
             if self.time[i2] - tcomp > timedelta(0, 20 * 60):
                 if self.obs[i][i2][1] > 0:
                     counter += 1
                     if i2:
                         i2 -= 1
                     temp = self.time[i2]
                     start = 1
                     passx = np.array(
                         [i, datetime(2000, 1, 1, 0, 0, 0, 0), 0, 0, 0])
                     el = 1
                     while el > 0 or start:
                         r = pr.sgp4Prop_fine(self.line1[i], self.line2[i],
                                              temp, grav)
                         azi, el, ran = tr.ECI2obs(r[0], r[1], r[2],
                                                   self.gs_llh[0],
                                                   self.gs_llh[1],
                                                   self.gs_llh[2], temp)
                         passx = np.vstack(
                             (passx, (i, self.time[i2], azi, el, ran)))
                         if start:
                             if el > 0:
                                 start = 0
                         temp += timedelta(0, fine)
                     maxel = np.amax(passx, axis=0)[3]
                     if maxel > minel:
                         if first:
                             self.passes = np.copy(passx)
                             first = 0
                         else:
                             self.passes = np.vstack((self.passes, passx))
                     del passx
                     tcomp = temp
     return counter
示例#18
0
    def basicTile(self, is_pct, zoom):
        print('Translating')
        Io.deleteFile(self.path_provider.translated_file)
        Tr.gdalTranslate(input_file=self.path_provider.merged_file,
                         out_file=self.path_provider.translated_file,
                         is_pct=is_pct)

        print('Warping')
        Io.deleteFile(self.path_provider.warped_file)
        Tr.gdalWarp(in_file=self.path_provider.translated_file,
                    out_file=self.path_provider.warped_file)

        print('Tiling')
        Io.deleteDirectory(self.path_provider.output_tiles_path)
        Io.makeDirectories(self.path_provider.output_tiles_path)
        Tr.gdal2Tiles(in_file=self.path_provider.warped_file,
                      out_dir=self.path_provider.output_tiles_path,
                      zoom=zoom)
 def __init__(self, transformation_parameters):   
     
     self.nn_input_image_shape = transformation_parameters['nn_input_image_shape']
     crop_pixel_count = transformation_parameters['crop_pixel']
     self.rescale_shape = [x+y for x,y in zip(self.nn_input_image_shape,crop_pixel_count)]
     self.rotation_angles = transformation_parameters['rotation_angles']
     self.rotation_angle_probabilities = transformation_parameters['rotaion_angle_probabilities']
     self.flip_hor_ver_probabilities = transformation_parameters['flip_hor_ver_probabilities']
     self.gaussian_filter_probability = transformation_parameters['gaussian_filter_probability']
     self.unsharpmask_filter_probability = transformation_parameters['unsharp_mask_filter_probability']
     
     self.Rescaler =  tfs.Rescale(out_size=self.rescale_shape)
     self.ReCropper =  tfs.ReCrop(out_size =  (self.nn_input_image_shape))
     self.Rotator = tfs.Rotate(rotations = self.rotation_angles,
                               rotation_probabilities = self.rotation_angle_probabilities)
     self.Flipper = tfs.Flip(flip_probabilites = self.flip_hor_ver_probabilities)
     self.Gaussian_filter = tfs.Apply_Gaussian_filter(probabity=self.gaussian_filter_probability)
     self.Unsharpmask_filter =  tfs.Apply_Unsharpmask_filter(probabity=self.unsharpmask_filter_probability)
示例#20
0
def main():
    state = pd.read_spss('/Users/kellenbullock/Desktop/Geographic Analysis II/Data/5303_EX_A.sav')
    
    Counties = state.query("Scale == 'Counties'")
    Schools = state.query("Scale == 'Schools'")
    Tracts = state.query("Scale == 'Tracts'")
    
    
    assigned_var_c = Counties[['Pct_Black', 'Pct_Two_Plus', 'Pct_SNAP', 'Pct_FIRE_I', 'Pct_Poverty', 'Pct_Unemp', 'Med_HomeValue', 'Pct_White', 'Pct_BlueCollar_O', 'Pct_Hispanic']]
    assigned_var_s = Schools[['Pct_Black', 'Pct_Two_Plus', 'Pct_SNAP', 'Pct_FIRE_I', 'Pct_Poverty', 'Pct_Unemp', 'Med_HomeValue', 'Pct_White', 'Pct_BlueCollar_O', 'Pct_Hispanic']]
    assigned_var_t = Tracts[['Pct_Black', 'Pct_Two_Plus', 'Pct_SNAP', 'Pct_FIRE_I', 'Pct_Poverty', 'Pct_Unemp', 'Med_HomeValue', 'Pct_White', 'Pct_BlueCollar_O', 'Pct_Hispanic']]
    
    assigned_var_t = assigned_var_t.reset_index()
    assigned_var_s = assigned_var_s.reset_index()
    assigned_var_s = assigned_var_s.drop(columns=['index'])
    assigned_var_t = assigned_var_t.drop(columns=['index'])
    
    # New Variables 
    var_c = Counties[['Pct_Unemp', 'Med_HomeValue', 'Pct_White', 'Pct_BlueCollar_O', 'Pct_Hispanic']]
    var_s = Schools[['Pct_Unemp', 'Med_HomeValue', 'Pct_White', 'Pct_BlueCollar_O', 'Pct_Hispanic']]
    var_t = Tracts[['Pct_Unemp', 'Med_HomeValue', 'Pct_White', 'Pct_BlueCollar_O', 'Pct_Hispanic']]
    
    var_t = var_t.reset_index()
    var_s = var_s.reset_index()
    var_t = var_t.drop(columns=['index'])
    var_s = var_s.drop(columns=['index'])
    
    # 1.
    #EDA.figures(var_c, 'Counties')
    #EDA.figures(var_t, 'Tracts')
    #EDA.figures(var_s, 'Schools')
    
    print('=====County=======')
    #EDA.Descrptives(Counties, var_c)
    print('======== Tracts =========')
    #EDA.Descrptives(Tracts, var_t)
    print('====== School Districts =======')
    #EDA.Descrptives(Schools, var_s)
    
    # 1 part b:
    #test_trans(assigned_var_c, 'Pct_Black')
    print('************')
    #test_trans(assigned_var_c, 'Pct_Hispanic')
    
    # 2,
    # This will make a pearson's r correlation matrix at the County scale
    # These do not work in the spyder IDE. Please see the jupyter notebook for outputs.
    corr = assigned_var_c.corr()
    corr.style.background_gradient(cmap='coolwarm').set_precision(3)
    
    EDA.df_to_pdf(corr, 'Matrix_1')
    
    ''' Easy way to order Correlations but without signs:
        correlations = assigned_var_c.corr().abs()
        stack = correlations.unstack()
        stack_order = s.sort_values(kind='quicksort')
    '''
    # 2. a. 8 strongest Person's correaltions. There was no easy way to do this I had to pull everythong by hand.
    corr_table = {
        'Variables': ['Pct_Poverty / Pct_SNAP', 'Pct_Unemp / Pct_SNAP', 'Pct_White / Pct_Unemp', 'Pct_White / Pct_SNAP', 'Pct_Unemp / Pct_Poverty', 'Pct_White / Pct_Two_Plus', 'Pct_White / Pct_Poverty', 'Pct_Fire_I / Pct_BlueCollar_O'],
        'Correlation': [0.757, 0.722, - 0.672, - 0.645, 0.605, - 0.599, - 0.593, - 0.574]
        }
    
    all_scale = {
         'Variables': ['Pct_Poverty / Pct_SNAP', 'Pct_Unemp / Pct_SNAP', 'Pct_White / Pct_Unemp', 'Pct_White / Pct_SNAP', 'Pct_Unemp / Pct_Poverty', 'Pct_White / Pct_Two_Plus', 'Pct_White / Pct_Poverty', 'Pct_Fire_I / Pct_BlueCollar_O'],
        'County': [0.757, 0.722, - 0.672, - 0.645, 0.605, - 0.599, - 0.593, - 0.574],
        'Tract': [0.773, 0.679, -0.474, -0.564, 0.628, -0.296, -0.495, -0.584],
        'School District': [0.732, 0.590, -0.440, -0.515, 0.494, -0.569, -0.442, -0.380]
        }
    
    pearson_r_table = pd.DataFrame(corr_table)
    pearson_r_table.to_excel('County Correlation.xlsx')
    EDA.df_to_pdf(pearson_r_table, 'Decending_corr')
    
    all_scale = pd.DataFrame(all_scale)
    all_scale.to_excel('Scale_Correlation.xlsx')
    EDA.df_to_pdf(all_scale, 'Scale_Correlation')
    
    # 3. 
    assigned_c_repub = Counties[['Pct_Repub', 'Pct_Black', 'Pct_Two_Plus', 'Pct_SNAP', 'Pct_FIRE_I', 'Pct_Poverty', 'Pct_Unemp', 'Med_HomeValue', 'Pct_White', 'Pct_BlueCollar_O', 'Pct_Hispanic']]
    
    assigned_c_repub = assigned_c_repub.reset_index()
    assigned_c_repub = assigned_c_repub.drop(columns=['index'])
    assigned_c_repub['Pct_Black'] = Transformations.log_trans(assigned_c_repub, 'Pct_Black')
    assigned_c_repub['Pct_Hispanic'] = Transformations.log_trans(assigned_c_repub, 'Pct_Hispanic')
    
    corr2 = assigned_c_repub.corr()
    corr2.style.background_gradient(cmap='coolwarm').set_precision(3)
    EDA.df_to_pdf(corr2, 'Matrix_2')
    
    # c.
    chosen = corr2[['Pct_Repub', 'Pct_Poverty', 'Pct_Unemp', 'Pct_White', 'Pct_SNAP']]
    x = ['Pct_Repub', 'Pct_Poverty', 'Pct_Unemp', 'Pct_White', 'Pct_SNAP']
    y = 'Pct_Repub'
    corr_scatter(chosen, x, y)
    
    # c. 4 scatter plots with y axis being pct_republican
    
    chosen = assigned_c_repub[['Pct_Repub', 'Pct_Poverty', 'Pct_Unemp', 'Pct_White', 'Pct_SNAP']]
    # Running partial correlation 4 times:
    Partial_Corr.partial_corr(chosen[['Pct_Repub', 'Pct_Poverty']])
    Partial_Corr.partial_corr(chosen[['Pct_Repub', 'Pct_White']])
    Partial_Corr.partial_corr(chosen[['Pct_Repub', 'Pct_SNAP']])
    Partial_Corr.partial_corr(chosen[['Pct_Repub', 'Pct_Unemp']])
    
    
    # 5.
    bivar_regres(chosen['Pct_Repub'], chosen['Pct_Poverty']) # Model 1
    bivar_regres(chosen['Pct_Repub'], chosen['Pct_SNAP'])    # Model 2
    bivar_regres(chosen['Pct_Repub'], chosen['Pct_White'])   # Model 3
    bivar_regres(chosen['Pct_Repub'], chosen['Pct_Unemp'])   # Model 4
示例#21
0
    #===============================================================================
    sys_cutfrom = ag_lammps.read_lmpdat(args.lmpdat, dcd=args.dcd, frame_idx_start=args.f, frame_idx_stop=args.f)

    # replicate cell if desired
    if args.rep is not None:
        # read last frame since the frame selection was done with read_lmpdat already
        sys_cutfrom.replicate_cell(n_start=args.rep[0], n_stop=args.rep[1], direction="a", frame_id=-1, adjust_box=True)
        sys_cutfrom.replicate_cell(n_start=args.rep[2], n_stop=args.rep[3], direction="b", frame_id=-1, adjust_box=True)
        sys_cutfrom.replicate_cell(n_start=args.rep[4], n_stop=args.rep[5], direction="c", frame_id=-1, adjust_box=True)
        sys_cutfrom.fetch_molecules_by_bonds()
        sys_cutfrom.mols_to_grps()

    # shift sys_cutfrom by given vector
    if args.shft is not None:
        args.shft = np.array(args.shft)
        M_shft = cgt.translation_matrix(args.shft)
        atm_idxs = list(range(len(sys_cutfrom.atoms)))
        sys_cutfrom.mm_atm_coords(-1, M_shft, False, *atm_idxs)
        del (atm_idxs, M_shft)

    #===============================================================================
    # PREPARE THE CUTTING SHAPE SYSTEM
    #===============================================================================
    if args.lmpdat_cutshape is not None or args.dcd_cutshape is not None:
        sys_cutshape = ag_lammps.read_lmpdat(args.lmpdat_cutshape, dcd=args.dcd_cutshape, frame_idx_start=args.f_cutshape, frame_idx_stop=args.f_cutshape)

        # replicate cell if desired
        if args.rep_cutshape is not None:
            sys_cutshape.replicate_cell(n_start=args.rep_cutshape[0], n_stop=args.rep_cutshape[1], direction="a", frame_id=-1, adjust_box=True)
            sys_cutshape.replicate_cell(n_start=args.rep_cutshape[2], n_stop=args.rep_cutshape[3], direction="b", frame_id=-1, adjust_box=True)
            sys_cutshape.replicate_cell(n_start=args.rep_cutshape[4], n_stop=args.rep_cutshape[5], direction="c", frame_id=-1, adjust_box=True)
示例#22
0
def get_rotational_matrix(molid,
                          atom_idx1,
                          atom_idx2,
                          axis_start,
                          axis_end,
                          frame_id=-1):
    """
    Get the matrix that rotates the bond between atom 1 and atom 2 onto axis.

    All atoms of selection are rotated on the axis defined by axis_start and
    axis_end. The rotational matrix is then defined by the axis formed of
    atom_idx1 and atom_idx2 (it is rotated so it lies on the former axis).


    Parameters
    ----------
    atom_idx1 : int
        index of first atom

    atom_idx2 : int
        index of second atom

    axis_start : numpy-array
        starting point of the axis (atom_idx1 will be translated onto that point)

    axis_stop : numpy-array
        ending point of the axis; start and end form the axis and therefor
        determine the rotation

    Returns
    -------
    trans_matrix : list {float float ...}
        translational matrix to move atom_idx1 to axis_start
    rot_matrix : list {float float ...}
        rotational matrix to rotate given axis to the desired one

    """
    # get relevant coordinates
    atom_coords1 = vmd_coords_numpy_arrays(
        molid, frame_id, selection="index {}".format(atom_idx1))
    atom_coords2 = vmd_coords_numpy_arrays(
        molid, frame_id, selection="index {}".format(atom_idx2))

    # define both axis
    axis1 = atom_coords2 - atom_coords1
    axis1 /= np.linalg.norm(axis1)

    axis2 = axis_end - axis_start

    try:
        axis2 /= np.linalg.norm(axis2)
    except TypeError:
        # vector of length 1 already
        pass

    # angle alpha between axis1 and axis2 (so we can rotate about it)
    alpha = np.arccos(
        np.dot(axis1, axis2) / (np.linalg.norm(axis1) * np.linalg.norm(axis2)))

    # rotational axis is crossproduct of axis1 and axis2
    rotaxis = np.cross(axis1, axis2)

    # create rotational matrix
    matrix1 = cgt.rotation_matrix(alpha, rotaxis)
    return list(matrix1.flatten())
示例#23
0
    output_name = "{}_{}".format(args.o, i)

    if args.restart:
        subdir = "CBZ_Dimer_anti_2H_flexible_scan_B97D3_cc-pVDZ_{}".format(i)
        dimeric_sys.oldchk = "CBZ_Dimer_anti_2H_flexible_scan_B97D3_cc-pVDZ_{}.chk".format(
            i)
    else:
        subdir = output_name

    os.mkdir(subdir)
    # calculate the current shift, multiply by -1 to make the shift towards
    # each other (x-coordinate is negative and we are moving according
    # the x-axis)
    if args.restart is False:
        current_shift = unit_vt_shift * (i * 0.1 * -1)
        cur_Tm = cgt.translation_matrix(current_shift)
        # translate the coordinates but let the coordinates from frame 0 stay
        # the same
        coords = dimeric_sys.mm_atm_coords(
            0, cur_Tm, True, *list(range(30, len(dimeric_sys.atoms))))
        dimeric_sys.ts_coords.append(coords)
    dimeric_sys.chk = "{}.chk".format(output_name)
    dimeric_sys.write_gau(
        "{}.gau".format(output_name),
        -1,
        True,
        title="CBZ anti-dimer h-bonds scan - interatomic distance: {}".format(
            i))

    #if args.restart is False:
    #    dimeric_sys.write_lmpdat("{}.lmpdat".format(output_name), -1,
示例#24
0
from ImageClient import ImPI
from SQLUtils import SQLUtils
import Transformations as trans
import pandas as pd

## Download the new images
images = ImPI().fetch_images_by_tag("cats", "/home/pi/c_drive/test_dl")
## Create raw and thumb files
image_out_data = pd.DataFrame()

for link in images:
    image_data = trans.expand_dir_and_transform(images[link], link)
    image_out_data = image_out_data.append(image_data, ignore_index=True)
    print("Finished processing {}".format(images[link]))
image_out_data.to_csv("sample_output.csv", index=False)
## Write data out to SQL
SQLUtils("novartis_dummy_db").write_dataframe_safe(image_out_data,
                                                   "pulled_images")
示例#25
0
def Ransac(Matches, TYPE, N, Epsilon):
    # Random sample consensus(RANSAC) is an iterative method to estimate parameters
    # of a mathematical model from a set of observed data that contains outliers.
    # By iteratively fitting a model to the data we find the best model by its ability
    # to characterize a threshold of inlier points.

    # Input: Corresponding Image coordinates in the two images (Matches)
    #        Threshold number of inlier points (N)
    #        Threshold value for Sum of Squared differences (Epsilon)
    #        TYPE determines the model to compute (1=Affine, 2=Homography)
    # Outputs: Number of inlier points
    #          Affine or Homography transformation matrix

    # Inlier count
    In_count = 0

    # Inlier array
    Inlier_Data = []

    while (In_count < N):
        # Randomly pick out points from the matches
        r = np.round((len(Matches) - 1)*np.random.random(np.int(np.round(len(Matches)/2)) + 1)).astype(int)
        Subsample = Matches[r,:]

        # Keep track of the inliers
        if (In_count > 0):
            Subsample = np.concatenate((Subsample, np.asarray(Inlier_Data)), axis=0)

        # Isolate the matches
        Image_Coords_1 = np.asarray(Subsample)[:,0:2]
        Image_Coords_2 = np.asarray(Subsample)[:,2:4]

        # Homography
        if (TYPE == 2):

            # Compute the homography matrix
            H = Transformations.Homography(Image_Coords_1, Image_Coords_2)

            # Compute the transformation and the inverse transformation
            X2 = Transformations.HomographyTransformer(H, Matches[:, 0:2])
            X1 = Transformations.HomographyTransformer(np.linalg.inv(H), Matches[:, 2:4])

            # Compute the euclidean distance
            ssd = SumEucDistance(X1, Matches[:, 0:2], X2, Matches[:, 2:4])

        # Affine
        elif (TYPE == 1):

            # Compute the affine matrix
            A = Transformations.AffineTransformer(Image_Coords_1, Image_Coords_2)

            # Compute the transformation coordinates and inverse transformation coordinates
            X2 = np.matmul(A, np.transpose(np.hstack((np.asarray(Matches[:,0:2]), np.ones(shape=(len(Matches),1))))))
            X1 = np.matmul(np.linalg.inv(A), np.transpose(np.hstack((np.asarray(Matches[:,2:4]), np.ones(shape=(len(Matches),1))))))

            # Compute the euclidean distance
            ssd = SumEucDistance(X1[0:2].T, Matches[:, 0:2], X2[0:2].T, Matches[:, 2:4])

        else:
            raise ValueError("Incorrect Type")

        # Extract the outliers
        for i in range(0, len(ssd)):
            if (ssd[i] < Epsilon):
                if [Matches[i,0],Matches[i,1],Matches[i,2],Matches[i,3]] not in np.asarray(Inlier_Data).tolist():
                    Inlier_Data.append(Matches[i,:])
                    In_count += 1
                    print(In_count)

    # Compute the transformation using the inliers
    Inlier_Data = np.asarray(Inlier_Data, dtype=float)

    if (TYPE == 1):
        return Inlier_Data, Transformations.AffineTransformer(Inlier_Data[:, 0:2], Inlier_Data[:, 2:4])
    elif (TYPE == 2):
        return Inlier_Data, Transformations.Homography(Inlier_Data[:, 0:2], Inlier_Data[:, 2:4])
示例#26
0
    def fill_up(self,
                num_bins,
                iterations=10,
                fill_up_plots=False,
                point_plots=False,
                RO=True,
                t=1):

        # consider every label seperately
        label_confidence = []
        for label in self.D.labels:
            label_idx = self.D.labels.index(label)
            '''collect training data'''
            data = self.D.X_b_train[self.D.Y_b_train == label]
            '''remove outliers, rotate data'''
            if RO:
                data = Transformations.remove_outliers_lof(data)
            trafo = self.trafo()
            data = trafo.transform(data)

            cdfs_scaled = np.empty((len(data[0]), num_bins))
            fitted_cdf = np.empty((len(data[0]), num_bins))
            fitted_ = np.empty((len(data[0]), num_bins))
            num_fill_up = 0
            data_range = []

            DE_list = []

            if fill_up_plots:
                f, ax = plt.subplots(nrows=1,
                                     ncols=len(data[0]),
                                     figsize=(6, 2.5))

            # consider every dimension
            for line in range(len(data[0])):
                '''project onto line, determine borders'''
                d = data[:, line]
                d_min = min(d)
                d_max = max(d)
                data_range.append([d_min, d_max])
                '''define Density Estimator here!'''
                DE_list.append(self.DE(num_bins))
                DE_list[line].estimate(d, d_min, d_max)
                '''estimate distribution'''
                fitted = self.density_func.fit(DE_list[line].mids,
                                               DE_list[line].values, d)
                fitted_[line] = copy.deepcopy(fitted)
                fitted_cdf[line] = np.cumsum(fitted)
                fitted_cdf[line] = fitted_cdf[line] / fitted_cdf[line][-1]
                '''to be filled up: the differences between the distribution curve and the histogram'''
                diff = fitted - DE_list[line].values
                '''number of points to add'''
                num_points_line = (len(d) /
                                   sum(DE_list[line].values)) * sum(diff)
                num_fill_up = max(num_fill_up, num_points_line)
                '''probability distribution for the fill-up'''
                if sum(diff) == 0:
                    cdfs_scaled[line] = [0] * num_bins
                else:
                    diff = diff / sum(diff)
                    diff = [max(diff[i], 0) for i in range(len(diff))]
                    cdfs_scaled[line] = np.cumsum(diff)
                    cdfs_scaled[line] = (
                        cdfs_scaled[line] /
                        cdfs_scaled[line][-1]) * num_points_line

                if fill_up_plots:
                    barWidth = DE_list[line].mids[1] - DE_list[line].mids[0]
                    fill = fitted_[line] - DE_list[line].values
                    ax[line].bar(DE_list[line].mids,
                                 DE_list[line].values,
                                 label='data',
                                 color='teal',
                                 width=barWidth)
                    ax[line].bar(DE_list[line].mids,
                                 [max(fill[i], 0) for i in range(len(fill))],
                                 bottom=DE_list[line].values,
                                 label='fill up',
                                 color='goldenrod',
                                 width=barWidth,
                                 hatch="...",
                                 edgecolor="white")
                    ax[line].plot(DE_list[line].mids,
                                  fitted_[line],
                                  label='fitted',
                                  c='mediumvioletred',
                                  linewidth=2)
                    ax[line].get_xaxis().set_ticks([])
                    ax[line].get_yaxis().set_ticks([])

            if fill_up_plots:
                ax[-1].legend()
                plt.show()
                # f.savefig('Results/Example_cluster_distr.pdf', format='pdf', dpi=1200, bbox_inches='tight')

            # determine the number of added points in total: max over dimensions
            num_fill_up = int(num_fill_up)
            if num_fill_up == 0:
                label_confidence.append(0)
                continue

            # best out of 10: go for the result with the highest confidence
            best_conf = 0
            leftover_points = []
            # kNN_rnd_dist, kNN_rnd_std = confidence_kNN_rnd_coeff(data_range, num_fill_up)
            kNN_rnd_dist, kNN_rnd_std = Confidence.confidence_kNN_train_sized_coeff(
                data, num_fill_up)
            for it in range(iterations):
                points = np.empty((num_fill_up, 0))

                # generate points
                for line in range(len(data[0])):
                    '''adjust cdf (in case there have to be more points added because of other lines)'''
                    distr_scaled = fitted_cdf[line] * max(
                        (num_fill_up - cdfs_scaled[line][-1]), 0)
                    cdf = cdfs_scaled[line] + distr_scaled
                    cdf = cdf / cdf[-1]  # normalize
                    '''generate random values according to the cdf'''
                    values = np.random.rand(num_fill_up)
                    value_bins = np.searchsorted(cdf, values)
                    coords = np.array([
                        random.uniform(DE_list[line].grid[value_bins[i]],
                                       DE_list[line].grid[value_bins[i] + 1])
                        for i in range(num_fill_up)
                    ]).reshape(num_fill_up, 1)
                    points = np.concatenate((points, coords), axis=1)
                '''compute the confidence of the result'''
                if len(points) < 20:
                    conf_b, conf_a, l_p = (0, 0, [[]])
                else:
                    conf_b, conf_a, l_p = Confidence.confidence_kNN_rnd(
                        points, kNN_rnd_dist, t * kNN_rnd_std)

                # add the points to the data set
                if conf_a > best_conf:
                    best_conf = conf_a
                    leftover_points = copy.deepcopy(l_p)
                    # leftover_points = points

                if point_plots:
                    plt.figure(it)
                    plt.scatter(data[:, 0],
                                data[:, 1],
                                c=self.colors[label_idx],
                                alpha=0.2,
                                s=3)
                    plt.scatter(points[:, 0],
                                points[:, 1],
                                c='red',
                                alpha=0.8,
                                s=8)
                    if len(l_p) > 0 and len(l_p[0]) > 0:
                        plt.scatter(l_p[:, 0],
                                    l_p[:, 1],
                                    c=self.colors[label_idx],
                                    alpha=0.8,
                                    s=8)
                    plt.show()
                    if len(data[0]) > 2:
                        plt.figure(it * 100)
                        plt.scatter(data[:, 0],
                                    data[:, 2],
                                    c=self.colors[label_idx],
                                    alpha=0.2,
                                    s=3)
                        plt.scatter(points[:, 0],
                                    points[:, 2],
                                    c='red',
                                    alpha=0.8,
                                    s=8)
                        if len(l_p) > 0 and len(l_p[0]) > 0:
                            plt.scatter(l_p[:, 0],
                                        l_p[:, 2],
                                        c=self.colors[label_idx],
                                        alpha=0.8,
                                        s=8)
                        plt.show()
            '''remove the points with low confidence, discard the result entirely 
               if the confidence is too low. Transform back the leftover points'''
            if len(leftover_points
                   ) > 0:  # and 1 / best_conf <= kNN_rnd_dist + t*kNN_rnd_std:
                add_me = trafo.transform_back(leftover_points)
                self.added_points = np.concatenate((self.added_points, add_me))
                self.added_labels = np.append(self.added_labels,
                                              [label] * len(add_me))

            label_confidence.append(best_conf)
        if point_plots:
            plt.show()

        return label_confidence
示例#27
0
args = parser.parse_args()

cbz = agum.Unification()
cbz.read_lmpdat(args.lmpdat)
cbz.import_dcd(args.dcd)
cbz.read_frames(frame=args.start)

with open(args.out, "w") as f_out:
    f_out.write("{:>9}{:>17}{:>17}{:>17}{:>17}\n".format(
        "Step", "ang_C13_N8_C5", "ang_C13_N8_C9", "ang_C9_N8_C5",
        "ang_b1_N8_b2"))

    for n, frame in enumerate(cbz.ts_coords[args.start:]):

        # omega-1/-2/-3 - angles between carbon and nitrogen
        ang_C13_N8_C5 = cgt.angle_between_vectors(frame[24] - frame[15],
                                                  frame[24] - frame[7])
        ang_C13_N8_C9 = cgt.angle_between_vectors(frame[24] - frame[15],
                                                  frame[24] - frame[25])
        ang_C9_N8_C5 = cgt.angle_between_vectors(frame[24] - frame[25],
                                                 frame[24] - frame[7])

        # phi - angle between benzenes' center of masses
        com_b1 = agg.get_com(
            [frame[14], frame[16], frame[18], frame[20], frame[22], frame[15]],
            [
                cbz.atm_types[cbz.atoms[14].atm_key].weigh,
                cbz.atm_types[cbz.atoms[16].atm_key].weigh,
                cbz.atm_types[cbz.atoms[18].atm_key].weigh,
                cbz.atm_types[cbz.atoms[20].atm_key].weigh,
                cbz.atm_types[cbz.atoms[22].atm_key].weigh,
                cbz.atm_types[cbz.atoms[15].atm_key].weigh
示例#28
0
        batch_size=args.batch_size)


def TrainData():
    return images_train, labels_train


pre_transform_images, labels = tf.case({
    tf.equal(data2use, 'stats'): StatsData,
    tf.equal(data2use, 'train'): TrainData,
    tf.equal(data2use, 'val'): ValData
})

# Applying our transformations on the images (and handling their corresponding labels as well):
# Transformations should be applied on the raw images, before applying any standartization, whitening etc'.
transformer = Transformations.Transformer(transformations=TRANSFORMATIONS_LIST)
images, labels = transformer.TransformImages_TF_OP(pre_transform_images,
                                                   labels)

# The example classifer was trained on standartized images, so applying standartization AFTER the transformations were applied:
post_processed_images = tf.map_fn(
    lambda im: tf.image.per_image_standardization(im), images)

train_batches_per_epoch = int(
    np.ceil(num_of_samples * args.train_portion / args.batch_size))
val_batches_per_epoch = int(
    np.ceil(num_of_samples * (1 - args.train_portion) / args.batch_size))

# Example classifier model:
classifier = cifar10.inference(post_processed_images)
logits = classifier.inference_logits()
示例#29
0
            if target_turn > control_turn:
                control_turn = min( target_turn, control_turn + 0.1 )
            elif target_turn < control_turn:
                control_turn = max( target_turn, control_turn - 0.1 )
            else:
                control_turn = target_turn

            twist = TwistStamped();

            for i in range(6):
                dXnp[i] = dX[i]


            if(changeframe!=1): # Change the frame of the veloccity
                R=my.quaternion_matrix([rot[3],rot[0],rot[1],rot[2]])
                A=np.concatenate((R[0:3][:,0:3],np.zeros(shape=(3,3))),1)
                B=np.concatenate((np.zeros(shape=(3,3)),R[0:3][:,0:3]),1)
                Screw=np.vstack((A,B))
                dXnp=np.dot(Screw,dXnp)

            twist.twist.linear.x = control_speed*dXnp[0]* direction;
            twist.twist.linear.y = control_speed*dXnp[1]* direction;
            twist.twist.linear.z = control_speed*dXnp[2]* direction;
            twist.twist.angular.x = control_turn*dXnp[3]* direction;
            twist.twist.angular.y = control_turn*dXnp[4]* direction;
            twist.twist.angular.z = control_turn*dXnp[5]* direction;
            
            pub.publish(twist)

            #print("loop: {0}".format(count))
示例#30
0
 def myfun_cutout(x):
     transformed_image = Transformations.cutout(x, level)
     return transformed_image
示例#31
0
import Ransac
import Transformations
import numpy as np
import matplotlib.pyplot as plt

I1 = plt.imread('library1.jpg')
I2 = plt.imread('library2.jpg')

matches = np.loadtxt('library_matches.txt')

Input_coord1 = matches[:,0:2]
Input_coord2 = matches[:,2:4]

### LS Affine transformation ###
Affine_T = Transformations.AffineTransformer(Input_coord1, Input_coord2)

# Generate a pseudo z-axis
PS_Input_coord1 = np.hstack((Input_coord1, np.ones(shape=(len(Input_coord1), 1))))
PS_Input_coord2 = np.hstack((Input_coord2, np.ones(shape=(len(Input_coord2), 1))))

# Transformed coordinates
Tr_coord1 = np.matmul(np.linalg.inv(Affine_T),np.transpose(PS_Input_coord2))
Tr_coord2 = np.matmul(Affine_T,np.transpose(PS_Input_coord1))

# Residuals and mean error
Residuals_IC1 = Tr_coord2.T-PS_Input_coord2
Residuals_IC2 = Tr_coord1.T-PS_Input_coord1

Mean_Res_IC1 = np.mean(Residuals_IC1)
Mean_Res_IC2 = np.mean(Residuals_IC2)
示例#32
-2
def main():

    img = cv2.imread('Lenna.jpg', 3)
    img = tr.RGB2YCBCR(img)
    Y, Cr, Cb = cv2.split(img)
    print(Y.shape)

    pathlib.Path('./FirstAttempt').mkdir(parents=True, exist_ok=True)
    cv2.imwrite("./FirstAttempt/OldY.jpg", Y)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    (Sl, (Sh, Sv, Sd)) = pywt.dwt2(Y, 'haar')

    ReducedCb = downscale_local_mean(Cb, (2, 2))
    ReducedCr = downscale_local_mean(Cr, (2, 2))

    # Sh = ReducedCb
    # Sv = ReducedCr

    NewY = pywt.idwt2((Sl, (ReducedCb, ReducedCr, Sd)), 'haar')

    cv2.imwrite("./FirstAttempt/NewYFirstTry.jpg", NewY)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    h = hf.Halftone('./FirstAttempt/NewYFirstTry.jpg')
    h.make(angles=[0, 15, 30, 45],
           antialias=True,
           percentage=10,
           sample=1,
           scale=2,
           style='grayscale')