Пример #1
0
    def fieldarray(self):
        #Prepare the commands to run batches of simulations
        p=0
        k = 0

        t=0
        T=[0.0]*max_t
        TimeS=[0.0]*max_t
        for t in range(1,max_t):
                p = stat(dirc +'/'+output_name+'.stat')["ElapsedWallTime"]["value"][-1-t]
                p2 = stat(dirc +'/'+output_name+'.stat')["ElapsedWallTime"]["value"][-1-(t-1)]
                #also the total number of time
                TimeS[t-1] = stat(dirc +'/'+output_name+'.stat')["ElapsedTime"]["value"][-1-(t-1)]
                # and the number of elements
                aux = stat(dirc +'/'+output_name+'.stat')["CoordinateMesh"]["elements"][-1-(t-1)]

                t_deltat= abs(p2-p)
                T[t-1]=t_deltat
                Eles = aux#Average of number of elements

            for n, i in enumerate(wall_times_max):
                if i == testy[k]:
                    print 'n:', n, 'and', 'i:',i
                    wall_times_max[n] = np.amax(T)
                    wall_times_min[n] = np.amin(T)
                    wall_times_avg[n] = np.mean(T)
                    TotalT[n] = np.amax(TimeS)
                    TotalCores[n] = Eles/cores_list[n]
                    k +=1
Пример #2
0
def get_convergence(statfileA, statfileB, field):
    dt_A = stat(statfileA)["ElapsedTime"]['value'][1] - stat(
        statfileA)["ElapsedTime"]['value'][0]
    dt_B = stat(statfileB)["ElapsedTime"]['value'][1] - stat(
        statfileB)["ElapsedTime"]['value'][0]

    a_error_l1 = sum(stat(statfileA)["Fluid"][field]["integral"]) * dt_A
    b_error_l1 = sum(stat(statfileB)["Fluid"][field]["integral"]) * dt_B

    a_error_l2 = sum(
        [x**2 * dt_A for x in stat(statfileA)["Fluid"][field]["l2norm"]])**0.5
    b_error_l2 = sum(
        [x**2 * dt_B for x in stat(statfileB)["Fluid"][field]["l2norm"]])**0.5

    a_error_inf = max(stat(statfileA)["Fluid"][field]["max"])
    b_error_inf = max(stat(statfileB)["Fluid"][field]["max"])

    # Velocity error calculation
    ab_ratio_l1 = a_error_l1 / b_error_l1
    ab_ratio_l2 = a_error_l2 / b_error_l2
    ab_ratio_inf = a_error_inf / b_error_inf

    #ab_error = [log(ab_ratio_l1, 2), log(ab_ratio_l2, 2), log(ab_ratio_inf, 2)]
    # Compute only the convergence using the max norm until ShallowWater.F90's output is fixed
    ab_error = [1000, 1000, log(ab_ratio_inf, 2)]
    return ab_error
Пример #3
0
def pressure_convergence(file_coarse, file_fine):
    ######## Pressure Errors: #######
    anal = stat(statfile_fine)['Fields']['AnalyticalPressure']['l2norm'][-1]
    error_coarse = stat(
        file_coarse)['Fields']['PressureError']['l2norm'][-1] / anal
    error_fine = stat(
        file_fine)['Fields']['PressureError']['l2norm'][-1] / anal
    pressure_convergence = np.log2(error_coarse / error_fine)
    return pressure_convergence
Пример #4
0
def main(argv=None):

        a_0 = settings.a0 # initial maximum perturbation
        g = settings.g # gravity
        eta= settings.eta # viscosity
        L= settings.L # wavelength
        timestep= settings.timestep # timestep 
        filename=''

        global debug
        debug=False
        #debug=True      

        try:                                
                opts, args = getopt.getopt(sys.argv[1:], "h:", ['file='])
        except getopt.GetoptError:  
                usage()                     
                sys.exit(2)                     
        for opt, arg in opts:                
                if opt == '--file':      
                    filename=arg
                elif opt == '-h' or opt == '--help':
                    usage()                     
                    sys.exit(2) 
        if filename=='':
                usage()                     
                sys.exit(2) 

        print('Using:\n\ta_0 =', a_0 )# initial maximum perturbation
        print('\tg =', g )# gravity
        print('\teta=', eta )# viscosity
        print('\tL=', L )# wavelength
        print('\ttimestep=', timestep )# timestep 

        
        ####################### Print time plot  ###########################
        print('Generating time plot')

        x_time= stat(filename)["ElapsedTime"]["value"]
        fs_simu= stat(filename)["water"]["FreeSurface"]["left"]
#        fs_simu= stat(filename)["water"]["FreeSurface"]["middle"]
        fs_ana = stat(filename)["water"]["FreeSurface_Analytical"]["left"]
#        fs_ana = stat(filename)["water"]["FreeSurface_Analytical"]["middle"]

        plt.ion() # swith on interactive mode
        fig = figure()
        ax = fig.add_subplot(111)

        ax.plot(x_time,fs_simu,'ro')
        ax.plot(x_time,fs_ana,'-')
        plt.title('Free Surface timeplot at x=0')
        plt.xlabel('Time [s]')
        plt.ylabel('Free surface [m]')

        plt.draw()
        raw_input("Please press Enter")
Пример #5
0
def main(argv=None):

        a_0 = settings.a0 # initial maximum perturbation
        g = settings.g # gravity
        eta= settings.eta # viscosity
        L= settings.L # wavelength
        timestep= settings.timestep # timestep 
        filename=''

        global debug
        debug=False
        #debug=True      

        try:                                
                opts, args = getopt.getopt(sys.argv[1:], "h:", ['file='])
        except getopt.GetoptError:  
                usage()                     
                sys.exit(2)                     
        for opt, arg in opts:                
                if opt == '--file':      
                    filename=arg
                elif opt == '-h' or opt == '--help':
                    usage()                     
                    sys.exit(2) 
        if filename=='':
                usage()                     
                sys.exit(2) 

        print('Using:\n\ta_0 =', a_0 )# initial maximum perturbation
        print('\tg =', g )# gravity
        print('\teta=', eta )# viscosity
        print('\tL=', L )# wavelength
        print('\ttimestep=', timestep )# timestep 

        
        ####################### Print time plot  ###########################
        print('Generating time plot')

        x_time= stat(filename)["ElapsedTime"]["value"]
        fs_simu= stat(filename)["water"]["FreeSurface"]["left"]
#        fs_simu= stat(filename)["water"]["FreeSurface"]["middle"]
        fs_ana = stat(filename)["water"]["FreeSurface_Analytical"]["left"]
#        fs_ana = stat(filename)["water"]["FreeSurface_Analytical"]["middle"]

        plt.ion() # swith on interactive mode
        fig = figure()
        ax = fig.add_subplot(111)

        ax.plot(x_time,fs_simu,'ro')
        ax.plot(x_time,fs_ana,'-')
        plt.title('Free Surface timeplot at x=0')
        plt.xlabel('Time [s]')
        plt.ylabel('Free surface [m]')

        plt.draw()
        raw_input("Please press Enter")
Пример #6
0
def velocity_convergence(file_coarse, file_fine):
    ######## Velocity Errors: #######
    anal = stat(
        statfile_fine)['Fields']['AnalyticalVelocity%magnitude']['l2norm'][-1]
    error_coarse = stat(
        file_coarse)['Fields']['VelocityError%magnitude']['l2norm'][-1] / anal
    error_fine = stat(
        file_fine)['Fields']['VelocityError%magnitude']['l2norm'][-1] / anal
    velocity_convergence = np.log2(error_coarse / error_fine)
    return velocity_convergence
Пример #7
0
def CMB_velocity_convergence(file_coarse, file_fine):
    ######## CMB Velocity Errors: #######
    anal = stat(statfile_fine
                )['Fields']['AnalyticalVelocity']['surface_l2norm%Bottom'][-1]
    error_coarse = stat(file_coarse)['Fields']['VelocityError'][
        'surface_l2norm%Bottom'][-1] / anal
    error_fine = stat(file_fine)['Fields']['VelocityError'][
        'surface_l2norm%Bottom'][-1] / anal
    CMB_velocity_convergence = np.log2(error_coarse / error_fine)
    return CMB_velocity_convergence
Пример #8
0
def surface_velocity_convergence(file_coarse, file_fine):
    ######## Surface Velocity Errors: #######
    anal = stat(statfile_fine
                )['Fields']['AnalyticalVelocity']['surface_l2norm%Top'][-1]
    error_coarse = stat(file_coarse)['Fields']['VelocityError'][
        'surface_l2norm%Top'][-1] / anal
    error_fine = stat(
        file_fine)['Fields']['VelocityError']['surface_l2norm%Top'][-1] / anal
    surface_velocity_convergence = np.log2(error_coarse / error_fine)
    return surface_velocity_convergence
Пример #9
0
def normalstress_convergence(file_coarse, file_fine):
    ######## Normalstress Errors: #######
    anal = stat(
        statfile_fine)['Fields']['AnalyticalNormalStress']['l2norm'][-1]
    error_coarse = stat(
        file_coarse)['Fields']['NormalStressError']['l2norm'][-1] / anal
    error_fine = stat(
        file_fine)['Fields']['NormalStressError']['l2norm'][-1] / anal
    normalstress_convergence = np.log2(error_coarse / error_fine)
    return normalstress_convergence
Пример #10
0
def CMB_normalstress_convergence(file_coarse, file_fine):
    ######## CMB Normalstress Errors: #######
    anal = stat(statfile_fine)['Fields']['AnalyticalNormalStress'][
        'surface_l2norm%Bottom'][-1]
    error_coarse = stat(file_coarse)['Fields']['NormalStressError'][
        'surface_l2norm%Bottom'][-1] / anal
    error_fine = stat(file_fine)['Fields']['NormalStressError'][
        'surface_l2norm%Bottom'][-1] / anal
    CMB_normalstress_convergence = np.log2(error_coarse / error_fine)
    return CMB_normalstress_convergence
def report_convergence(file1, file2):
  print(file1, "->", file2)
  
  stat1 = stat(file1)
  stat2 = stat(file2)

  print(stat1["dt"]["value"][0], "->", stat2["dt"]["value"][0])
  
  errortop_l2_1 = sqrt(sum(stat1["Fluid"]["DifferenceSquared"]["surface_integral%TopSurfaceL2Norm"][1:]*stat1["dt"]["value"][1:]))
  errortop_l2_2 = sqrt(sum(stat2["Fluid"]["DifferenceSquared"]["surface_integral%TopSurfaceL2Norm"][1:]*stat2["dt"]["value"][1:]))
  convergencetop_l2 = log((errortop_l2_1/errortop_l2_2), 2)

  print('  convergencetop_l2 = ', convergencetop_l2)
  print('    errortop_l2_1 = ', errortop_l2_1)
  print('    errortop_l2_2 = ', errortop_l2_2)
  
  errorbottom_l2_1 = sqrt(sum(stat1["Fluid"]["DifferenceSquared"]["surface_integral%BottomSurfaceL2Norm"][1:]*stat1["dt"]["value"][1:]))
  errorbottom_l2_2 = sqrt(sum(stat2["Fluid"]["DifferenceSquared"]["surface_integral%BottomSurfaceL2Norm"][1:]*stat2["dt"]["value"][1:]))
  convergencebottom_l2 = log((errorbottom_l2_1/errorbottom_l2_2), 2)

  print('  convergencebottom_l2 = ', convergencebottom_l2)
  print('    errorbottom_l2_1 = ', errorbottom_l2_1)
  print('    errorbottom_l2_2 = ', errorbottom_l2_2)
  
  error_l2_1 = sqrt(sum(stat1["Fluid"]["DifferenceSquared"]["surface_integral%SurfaceL2Norm"][1:]*stat1["dt"]["value"][1:]))
  error_l2_2 = sqrt(sum(stat2["Fluid"]["DifferenceSquared"]["surface_integral%SurfaceL2Norm"][1:]*stat2["dt"]["value"][1:]))
  convergence_l2 = log((error_l2_1/error_l2_2), 2)

  print('  convergence_l2 = ', convergence_l2)
  print('    error_l2_1 = ', error_l2_1)
  print('    error_l2_2 = ', error_l2_2)
  
  error_linf_1 = stat1["Fluid"]["FreeSurfaceDifference"]["max"].max()
  error_linf_2 = stat2["Fluid"]["FreeSurfaceDifference"]["max"].max()
  convergence_linf = log((error_linf_1/error_linf_2), 2)

  print('  convergence_linf = ', convergence_linf)
  print('    error_linf_1 = ', error_linf_1)
  print('    error_linf_2 = ', error_linf_2)

  quad1 = quad(lambda t: solution.nond_error_amp(stat1, t)**2, stat1["ElapsedTime"]["value"][0], stat1["ElapsedTime"]["value"][-1], limit=1000)
  quad2 = quad(lambda t: solution.nond_error_amp(stat2, t)**2, stat2["ElapsedTime"]["value"][0], stat2["ElapsedTime"]["value"][-1], limit=1000)
  errormaxfs_l2_1 = sqrt(quad1[0])
  errormaxfs_l2_2 = sqrt(quad2[0])
  convergencemaxfs_l2 = log((errormaxfs_l2_1/errormaxfs_l2_2), 2)

  print('  convergencemaxfs_l2 = ', convergencemaxfs_l2)
  print('    errormaxfs_l2_1 = ', errormaxfs_l2_1, '(', quad1[1], ')')
  print('    errormaxfs_l2_2 = ', errormaxfs_l2_2, '(', quad2[1], ')')

  return [convergencetop_l2, convergencebottom_l2, convergence_linf]
Пример #12
0
    def get_run_stats(self,
                      dir,
                      log_name,
                      cores=None,
                      solver_t=False,
                      riemman_t=False):
        """
            Extracts the Load balancer, solver and CDOFs
            out of the stat files, crawling directories following the name
            coreXXX/rad_radiant_noio.e

            If multiple group sets exist in the file, then all of them will be 
            read, using the format of XXX_gset_G, where X is the cpu and G the
            group set number
        """
        # Store the directory before you start
        root_dir = os.getcwd()
        STAT_F = f'{log_name}.Neutron.output_quantities.stat'

        # If we don't have multiple cores to loop through just set the core
        # count to 1
        no_cores_dir = False
        if cores is None:
            no_cores_dir = True
            cores = [1]

        for cpu in cores:
            # If we are crawling through multiple core directories
            # Change to that core dir
            if no_cores_dir:
                os.chdir(f'{dir}')
            else:
                os.chdir(f'{dir}/core{cpu}')

            self.LB_T[cpu] = stat(STAT_F)['RadiantLoadBalanceTime']['Value']
            self.WALL_T[cpu] = stat(STAT_F)['ElapsedWallTime']['Value']

            if solver_t:
                self.SOLVER_T[cpu] = stat(STAT_F)['RadiantSolveTime']['Value']

            if riemman_t:
                self.RIEMMAN_T[cpu] = stat(
                    STAT_F)['RadiantCalcRiemmanMatsTime']['Value']

            self.get_average_dofs(STAT_F, cpu)

        # Change back to the initial directory
        os.chdir(root_dir)

        return self.WALL_T, self.LB_T, self.CDOFS
def report_convergence(file1, file2):
    print(file1, "->", file2)

    stat1 = stat(file1)
    stat2 = stat(file2)

    errortop_l2_1 = stat1["Fluid"]["FreeSurfaceDifference"][
        "surface_l2norm%Top"][-1]
    errortop_l2_2 = stat2["Fluid"]["FreeSurfaceDifference"][
        "surface_l2norm%Top"][-1]
    convergencetop_l2 = log((errortop_l2_1 / errortop_l2_2), 2)

    print('  convergencetop_l2 = ', convergencetop_l2)
    print('    errortop_l2_1 = ', errortop_l2_1)
    print('    errortop_l2_2 = ', errortop_l2_2)

    errorbottom_l2_1 = stat1["Fluid"]["FreeSurfaceDifference"][
        "surface_l2norm%Bottom"][-1]
    errorbottom_l2_2 = stat2["Fluid"]["FreeSurfaceDifference"][
        "surface_l2norm%Bottom"][-1]
    convergencebottom_l2 = log((errorbottom_l2_1 / errorbottom_l2_2), 2)

    print('  convergencebottom_l2 = ', convergencebottom_l2)
    print('    errorbottom_l2_1 = ', errorbottom_l2_1)
    print('    errorbottom_l2_2 = ', errorbottom_l2_2)

    error_l2_1 = stat1["Fluid"]["FreeSurfaceDifference"][
        "surface_l2norm%Both"][-1]
    error_l2_2 = stat2["Fluid"]["FreeSurfaceDifference"][
        "surface_l2norm%Both"][-1]
    convergence_l2 = log((error_l2_1 / error_l2_2), 2)

    print('  convergence_l2 = ', convergence_l2)
    print('    error_l2_1 = ', error_l2_1)
    print('    error_l2_2 = ', error_l2_2)

    error_linf_1 = stat1["Fluid"]["FreeSurfaceDifference"]["max"][-1]
    error_linf_2 = stat2["Fluid"]["FreeSurfaceDifference"]["max"][-1]
    convergence_linf = log((error_linf_1 / error_linf_2), 2)

    print('  convergence_linf = ', convergence_linf)
    print('    error_linf_1 = ', error_linf_1)
    print('    error_linf_2 = ', error_linf_2)

    return [convergencetop_l2, convergencebottom_l2, convergence_linf]
def report_convergence(file1, file2):
    print(file1, "->", file2)

    stat1 = stat(file1)
    stat2 = stat(file2)

    errortop_l2_1 = sqrt(stat1["Fluid"]["DifferenceSquared"]
                         ["surface_integral%TopSurfaceL2Norm"][-1])
    errortop_l2_2 = sqrt(stat2["Fluid"]["DifferenceSquared"]
                         ["surface_integral%TopSurfaceL2Norm"][-1])
    convergencetop_l2 = log((errortop_l2_1 / errortop_l2_2), 2)

    print('  convergencetop_l2 = ', convergencetop_l2)
    print('    errortop_l2_1 = ', errortop_l2_1)
    print('    errortop_l2_2 = ', errortop_l2_2)

    errorbottom_l2_1 = sqrt(stat1["Fluid"]["DifferenceSquared"]
                            ["surface_integral%BottomSurfaceL2Norm"][-1])
    errorbottom_l2_2 = sqrt(stat2["Fluid"]["DifferenceSquared"]
                            ["surface_integral%BottomSurfaceL2Norm"][-1])
    convergencebottom_l2 = log((errorbottom_l2_1 / errorbottom_l2_2), 2)

    print('  convergencebottom_l2 = ', convergencebottom_l2)
    print('    errorbottom_l2_1 = ', errorbottom_l2_1)
    print('    errorbottom_l2_2 = ', errorbottom_l2_2)

    error_l2_1 = sqrt(stat1["Fluid"]["DifferenceSquared"]
                      ["surface_integral%SurfaceL2Norm"][-1])
    error_l2_2 = sqrt(stat2["Fluid"]["DifferenceSquared"]
                      ["surface_integral%SurfaceL2Norm"][-1])
    convergence_l2 = log((error_l2_1 / error_l2_2), 2)

    print('  convergence_l2 = ', convergence_l2)
    print('    error_l2_1 = ', error_l2_1)
    print('    error_l2_2 = ', error_l2_2)

    error_linf_1 = stat1["Fluid"]["FreeSurfaceDifference"]["max"][-1]
    error_linf_2 = stat2["Fluid"]["FreeSurfaceDifference"]["max"][-1]
    convergence_linf = log((error_linf_1 / error_linf_2), 2)

    print('  convergence_linf = ', convergence_linf)
    print('    error_linf_1 = ', error_linf_1)
    print('    error_linf_2 = ', error_linf_2)

    return [convergencetop_l2, convergencebottom_l2, convergence_linf]
Пример #15
0
    def get_run_stats(self, dir, log_name, cores):
        """
            Extracts the Load balancer, solver and CDOFs
            out of the stat files, crawling directories following the name
            coreXXX/rad_radiant_noio.e
        """
        # Store the directory before you start
        root_dir = os.getcwd()
        STAT_F = f'{log_name}.Neutron.output_quantities.stat'
        # Use the Fluidity module to load the stat files into arrays
        for cpu in cores:
            os.chdir(f'{dir}/core{cpu}')
            self.LB_T[cpu] = stat(STAT_F)['RadiantLoadBalanceTime']['Value']
            self.WALL_T[cpu] = stat(STAT_F)['ElapsedWallTime']['Value']
            self.CDOFS[cpu] = stat(STAT_F)['ContinuousDOF_per_group']['Value']

        # Change back to the initial directory
        os.chdir(root_dir)

        return self.WALL_T, self.LB_T, self.CDOFS
def get_convergence(statfileA, statfileB, field):
  dt_A = stat(statfileA)["ElapsedTime"]['value'][1] - stat(statfileA)["ElapsedTime"]['value'][0]
  dt_B = stat(statfileB)["ElapsedTime"]['value'][1] - stat(statfileB)["ElapsedTime"]['value'][0]

  a_error_l1 = sum(stat(statfileA)["Fluid"][field]["integral"])*dt_A
  b_error_l1 = sum(stat(statfileB)["Fluid"][field]["integral"])*dt_B

  a_error_l2 = sum([x**2*dt_A for x in stat(statfileA)["Fluid"][field]["l2norm"]])**0.5
  b_error_l2 = sum([x**2*dt_B for x in stat(statfileB)["Fluid"][field]["l2norm"]])**0.5

  a_error_inf = max(stat(statfileA)["Fluid"][field]["max"])
  b_error_inf = max(stat(statfileB)["Fluid"][field]["max"])

  # Velocity error calculation
  ab_ratio_l1 = a_error_l1 / b_error_l1
  ab_ratio_l2 = a_error_l2 / b_error_l2
  ab_ratio_inf = a_error_inf / b_error_inf

  ab_error = [log(ab_ratio_l1, 2), log(ab_ratio_l2, 2), log(ab_ratio_inf, 2)]
  return ab_error
Пример #17
0
def get_walltime(foldername, cwd):
    from fluidity_tools import stat_parser as stat
    walltime = 1e50
    ##Get into the folder
    os.chdir(foldername)
    output_name = libspud.get_option('/simulation_name')
    walltime = stat('./' + output_name +
                    '.stat')["ElapsedWallTime"]["value"][-1]
    ##Return to original path
    os.chdir(cwd)
    return walltime
Пример #18
0
def get_convergence(statfileA, statfileB, field):
  dt_A = stat(statfileA)["ElapsedTime"]['value'][1] - stat(statfileA)["ElapsedTime"]['value'][0]
  dt_B = stat(statfileB)["ElapsedTime"]['value'][1] - stat(statfileB)["ElapsedTime"]['value'][0]

  a_error_l1 = sum(stat(statfileA)["Fluid"][field]["integral"])*dt_A
  b_error_l1 = sum(stat(statfileB)["Fluid"][field]["integral"])*dt_B

  a_error_l2 = sum([x**2*dt_A for x in stat(statfileA)["Fluid"][field]["l2norm"]])**0.5
  b_error_l2 = sum([x**2*dt_B for x in stat(statfileB)["Fluid"][field]["l2norm"]])**0.5

  a_error_inf = max(stat(statfileA)["Fluid"][field]["max"])
  b_error_inf = max(stat(statfileB)["Fluid"][field]["max"])

  # Velocity error calculation
  ab_ratio_l1 = a_error_l1 / b_error_l1
  ab_ratio_l2 = a_error_l2 / b_error_l2
  ab_ratio_inf = a_error_inf / b_error_inf

  ab_error = [log(ab_ratio_l1, 2), log(ab_ratio_l2, 2), log(ab_ratio_inf, 2)]
  return ab_error
def get_convergence(statfileA, statfileB, field):
  dt_A = stat(statfileA)["ElapsedTime"]['value'][1] - stat(statfileA)["ElapsedTime"]['value'][0]
  dt_B = stat(statfileB)["ElapsedTime"]['value'][1] - stat(statfileB)["ElapsedTime"]['value'][0]
  
  a_error_l1 = sum(stat(statfileA)["Fluid"][field]["integral"])*dt_A
  b_error_l1 = sum(stat(statfileB)["Fluid"][field]["integral"])*dt_B

  a_error_l2 = sum([x**2*dt_A for x in stat(statfileA)["Fluid"][field]["l2norm"]])**0.5
  b_error_l2 = sum([x**2*dt_B for x in stat(statfileB)["Fluid"][field]["l2norm"]])**0.5

  a_error_inf = max(stat(statfileA)["Fluid"][field]["max"])
  b_error_inf = max(stat(statfileB)["Fluid"][field]["max"])

  # Velocity error calculation
  ab_ratio_l1 = a_error_l1 / b_error_l1
  ab_ratio_l2 = a_error_l2 / b_error_l2
  ab_ratio_inf = a_error_inf / b_error_inf

  #ab_error = [log(ab_ratio_l1, 2), log(ab_ratio_l2, 2), log(ab_ratio_inf, 2)]
  # Compute only the convergence using the max norm until ShallowWater.F90's output is fixed
  ab_error = [1000, 1000, log(ab_ratio_inf, 2)]
  return ab_error
def report_convergence(file1, file2):
  print(file1, "->", file2)
  
  stat1 = stat(file1)
  stat2 = stat(file2)

  errortop_l2_1 = sqrt(stat1["Fluid"]["DifferenceSquared"]["surface_integral%TopSurfaceL2Norm"][-1])
  errortop_l2_2 = sqrt(stat2["Fluid"]["DifferenceSquared"]["surface_integral%TopSurfaceL2Norm"][-1])
  convergencetop_l2 = log((errortop_l2_1/errortop_l2_2), 2)

  print('  convergencetop_l2 = ', convergencetop_l2)
  print('    errortop_l2_1 = ', errortop_l2_1)
  print('    errortop_l2_2 = ', errortop_l2_2)
  
  errorbottom_l2_1 = sqrt(stat1["Fluid"]["DifferenceSquared"]["surface_integral%BottomSurfaceL2Norm"][-1])
  errorbottom_l2_2 = sqrt(stat2["Fluid"]["DifferenceSquared"]["surface_integral%BottomSurfaceL2Norm"][-1])
  convergencebottom_l2 = log((errorbottom_l2_1/errorbottom_l2_2), 2)

  print('  convergencebottom_l2 = ', convergencebottom_l2)
  print('    errorbottom_l2_1 = ', errorbottom_l2_1)
  print('    errorbottom_l2_2 = ', errorbottom_l2_2)
  
  error_l2_1 = sqrt(stat1["Fluid"]["DifferenceSquared"]["surface_integral%SurfaceL2Norm"][-1])
  error_l2_2 = sqrt(stat2["Fluid"]["DifferenceSquared"]["surface_integral%SurfaceL2Norm"][-1])
  convergence_l2 = log((error_l2_1/error_l2_2), 2)

  print('  convergence_l2 = ', convergence_l2)
  print('    error_l2_1 = ', error_l2_1)
  print('    error_l2_2 = ', error_l2_2)
  
  error_linf_1 = stat1["Fluid"]["FreeSurfaceDifference"]["max"][-1]
  error_linf_2 = stat2["Fluid"]["FreeSurfaceDifference"]["max"][-1]
  convergence_linf = log((error_linf_1/error_linf_2), 2)

  print('  convergence_linf = ', convergence_linf)
  print('    error_linf_1 = ', error_linf_1)
  print('    error_linf_2 = ', error_linf_2)

  return [convergencetop_l2, convergencebottom_l2, convergence_linf]
Пример #21
0
    def get_average_dofs(self, STAT_F, cpu):
        """
            NOTE: we are not normalising with the number of
                   energy groups per group set to give the 
                   total number of DOFs. The user will have 
                   to multiply with the ratio of energy groups/group sets
                   to get the total DOFs

            NOTE 2: This will not work with spatial adaptivity
        """
        # Try and get the number of nodes when one group set is used
        try:
            self.Nnodes = stat(STAT_F)['NumberOfNodes']['Value'][-1]
        except KeyError:
            self.Nnodes = stat(STAT_F)['NumberOfContNodes_gset_1']['Value'][-1]

        g_set = 0
        # Try to get the DOFs for when one group set is used
        try:
            self.CDOFS[cpu] = stat(STAT_F)['ContinuousDOF_per_group']['Value']
            self.DDOFS[cpu] = stat(
                STAT_F)['DiscontinuousDOF_per_group']['Value']
        # We have multiple group sets. Lets extract them recursively
        except KeyError:
            # Start reading the group sets
            g_set = 1
            while True:
                try:
                    self.CDOFS[f'{cpu}_gset_{g_set}'] = stat(STAT_F)[
                        f'ContinuousDOF_per_group_gset_{g_set}']['Value']
                    self.DDOFS[f'{cpu}_gset_{g_set}'] = stat(STAT_F)[
                        f'DiscontinuousDOF_per_group_gset_{g_set}']['Value']
                    g_set += 1
                # We run out of group sets to read so exit
                except KeyError:
                    break

        # Both the CDOFs and the DDOFs have the same keys, therefore simply
        # add the two dictionaries.
        # We know how many group sets we have, so add the DOFs of all them
        if g_set > 1:
            temp = [0] * len(self.CDOFS[f'{cpu}_gset_1'])
            for g in range(1, g_set):
                temp += self.CDOFS[f'{cpu}_gset_{g}'] + \
                    self.DDOFS[f'{cpu}_gset_{g}']
            self.ALLDOFS[cpu] = temp

        else:
            self.ALLDOFS[cpu] = self.CDOFS[cpu] + self.DDOFS[cpu]

        # Normalise with the the number of spatial nodes
        self.ALLDOFS[cpu] /= self.Nnodes

        return self.ALLDOFS[cpu]
Пример #22
0
pylab.rc('font', size=32)

pylab.subplot(211)

statfile24 = "particle_rayleigh-taylor-mu10-24.stat"
statfile48 = "particle_rayleigh-taylor-mu10-48.stat"

# Data from paper:
cnd_data = numpy.loadtxt("rms_data.txt")
particle_data = numpy.loadtxt("rms_data_4.txt")

#Scaling factor to account fo dimensions of model
scaling_factor = numpy.sqrt(1. / 0.9142)

# First plot 24x24 case:
pylab.plot(stat(statfile24)["ElapsedTime"]["value"][:],
           stat(statfile24)["Buoyant"]["Velocity%magnitude"]["l2norm"][:] *
           scaling_factor,
           'r',
           linestyle='-',
           lw=4.0,
           label="Particle-24")

# Next plot 48x48 case:
pylab.plot(stat(statfile48)["ElapsedTime"]["value"][:],
           stat(statfile48)["Buoyant"]["Velocity%magnitude"]["l2norm"][:] *
           scaling_factor,
           'b',
           linestyle='-',
           lw=4.0,
           label="Particle-48")
Пример #23
0
#!/usr/bin/python

# This script plots up the Nusselt number for both the 24x24 and 48x48 cases:
import pylab
from fluidity_tools import stat_parser as stat

# Stafiles:
statfile24="stokes-sc-Ra1e5-24.stat"
statfile48="stokes-sc-Ra1e5-48.stat"

# First plot 24x24 case:
pylab.plot(stat(statfile24)["CoordinateMesh"]["nodes"][-1],
      -stat(statfile24)["Fluid"]["Temperature"]["surface_integral%Top"][-1],
      linestyle='None', marker='o', markerfacecolor='0.15')

# Next plot 48x48 case:
pylab.plot(stat(statfile48)["CoordinateMesh"]["nodes"][-1],
      -stat(statfile48)["Fluid"]["Temperature"]["surface_integral%Top"][-1],
       linestyle='None', marker='o', markerfacecolor='0.15')

# Plot benchmark value as line for comparison:
pylab.plot([100,8e4],[10.534,10.534],'k--',lw=0.6)

pylab.xlabel(r"Vertices")
pylab.ylabel(r"Nusselt Number")
pylab.xlim(100,1e4)
pylab.ylim(9.0,11.0)

pylab.savefig("Nu_1e5.png")
Пример #24
0
#!/usr/bin/python

# This script plots up the Nusselt number for both the 24x24 and 48x48 cases:
import pylab
from fluidity_tools import stat_parser as stat

# Stafiles:
statfile24 = "stokes-sc-Ra1e5-24.stat"
statfile48 = "stokes-sc-Ra1e5-48.stat"

# First plot 24x24 case:
pylab.plot(
    stat(statfile24)["CoordinateMesh"]["nodes"][-1],
    -stat(statfile24)["Fluid"]["Temperature"]["surface_integral%Top"][-1],
    linestyle='None',
    marker='o',
    markerfacecolor='0.15')

# Next plot 48x48 case:
pylab.plot(
    stat(statfile48)["CoordinateMesh"]["nodes"][-1],
    -stat(statfile48)["Fluid"]["Temperature"]["surface_integral%Top"][-1],
    linestyle='None',
    marker='o',
    markerfacecolor='0.15')

# Plot benchmark value as line for comparison:
pylab.plot([100, 8e4], [10.534, 10.534], 'k--', lw=0.6)

pylab.xlabel(r"Vertices")
pylab.ylabel(r"Nusselt Number")
Пример #25
0
#!/usr/bin/python

# This sript plots up the RMS velocity for both the 24x24 and 48x48 case
import pylab
from fluidity_tools import stat_parser as stat

# Stafiles:
statfile24 = "stokes-sc-Ra1e5-24.stat"
statfile48 = "stokes-sc-Ra1e5-48.stat"

# First plot 24x24 case:
pylab.plot(stat(statfile24)["CoordinateMesh"]["nodes"][-1],
           stat(statfile24)["Fluid"]["Velocity%magnitude"]["l2norm"][-1],
           linestyle='None',
           marker='o',
           markerfacecolor='0.15')

# Next plot 48x48 case:
pylab.plot(stat(statfile48)["CoordinateMesh"]["nodes"][-1],
           stat(statfile48)["Fluid"]["Velocity%magnitude"]["l2norm"][-1],
           linestyle='None',
           marker='o',
           markerfacecolor='0.15')

# Plot benchmark value as line for comparison:
pylab.plot([100, 8e4], [193.214, 193.214], 'k--', lw=0.6)

pylab.xlabel(r"Vertices")
pylab.ylabel(r"RMS Velocity")
pylab.xlim(100, 1e4)
pylab.ylim(192.0, 195.0)
Пример #26
0
    # The adapt order requested is larger than the number of adapt steps present
    elif max_adapt > len(first_eigen_indices):
        max_adapt = len(first_eigen_indices) - 1

    # Slice all dictionary lists up to the max_adapt order
    for key in input_dict:
        input_dict[key] = input_dict[key][:first_eigen_indices[max_adapt]]

    return input_dict


# This will read the entire columns from the file which means that if we have
# data with varying orders of angular adapts, we will compare incorrect data
for i in stats_to_read:
    power_it_3gpr[i] = stat(f'{root_dir}/{stat_3f}')[i]['Value']
    power_it_3gpr_lb[i] = stat(f'{root_dir_lb}/{stat_3f}')[i]['Value']
    power_it_2gpr[i] = stat(f'{root_dir}/{stat_2f}')[i]['Value']
    power_it_2gpr_lb[i] = stat(f'{root_dir_lb}/{stat_2f}')[i]['Value']
    power_it_1gpr[i] = stat(f'{root_dir}/{stat_1f}')[i]['Value']
    power_it_1gpr_lb[i] = stat(f'{root_dir_lb}/{stat_1f}')[i]['Value']

# Go and count how many first eigen iterations we have in our data
# NOTE: Remember to do this for all the dictionaries
power_it_3gpr = eigen_stat_file_slice(power_it_3gpr, MAX_ADAPT)
power_it_3gpr_lb = eigen_stat_file_slice(power_it_3gpr_lb, MAX_ADAPT)

power_it_2gpr = eigen_stat_file_slice(power_it_2gpr, MAX_ADAPT)
power_it_2gpr_lb = eigen_stat_file_slice(power_it_2gpr_lb, MAX_ADAPT)

power_it_1gpr = eigen_stat_file_slice(power_it_1gpr, MAX_ADAPT)
Пример #27
0
from fluidity_tools import stat_parser as stat
from vtktools import *
from math import log
import numpy as np

meshes = [['A', 'B'], ['B', 'C']]  #, ['C','D']]

convergence = np.ones(2) * 1e10

print('')
print('ORDER OF CONVERGENCE')
print('-------------------------------------------')

print('TracerError:')
print('-------------------------------------------')

for i, mesh in enumerate(meshes):

    a_error = stat("MMS_" + str(mesh[0]) +
                   ".stat")["NS"]["TracerError"]["l2norm"][-1]
    b_error = stat("MMS_" + str(mesh[1]) +
                   ".stat")["NS"]["TracerError"]["l2norm"][-1]

    ratio = a_error / b_error

    print(mesh[0] + '->' + mesh[1] + ': ', log(ratio, 2))

    convergence = log(ratio, 2)

print('-------------------------------------------')
Пример #28
0
    remaining_tests = int(sub_tests[k][-1])

#Prepare the commands to run batches of simulations
for testy in sub_tests[:]:
    p = 0
    k = 0
    while len(testy) > k:
        dirc = 'hpc/N' + str(testy[k])
        tests_list2[k] = dirc
        #Retrieve the walltimes and timesteps for each simulation
        t = 0
        T = [0.0] * max_t
        TimeS = [0.0] * max_t
        for t in range(1, max_t):
            if END == True:
                p = stat(dirc + '/' + output_name +
                         '.stat')["ElapsedWallTime"]["value"][t]
                p2 = stat(dirc + '/' + output_name +
                          '.stat')["ElapsedWallTime"]["value"][(t - 1)]
                #also the total number of time
                TimeS[t - 1] = stat(dirc + '/' + output_name +
                                    '.stat')["ElapsedTime"]["value"][(t - 1)]
                # and the number of elements
                aux = stat(dirc + '/' + output_name +
                           '.stat')["CoordinateMesh"]["elements"][(t - 1)]
            else:
                p = stat(dirc + '/' + output_name +
                         '.stat')["ElapsedWallTime"]["value"][-1 - t]
                p2 = stat(dirc + '/' + output_name +
                          '.stat')["ElapsedWallTime"]["value"][-1 - (t - 1)]
                #also the total number of time
                TimeS[t - 1] = stat(dirc + '/' + output_name +
Пример #29
0
import numpy as np

meshes = [['A','B'], ['B','C']]#, ['C','D']]

convergence = np.ones(2) * 1e10

print('')
print('ORDER OF CONVERGENCE')
print('-------------------------------------------')

print('VelocityError:')
print('-------------------------------------------')

for i, mesh in enumerate(meshes):

    a_error_x = stat("MMS_"+str(mesh[0])+".stat")["NS"]["VelocityError%1"]["l2norm"][-1]
    b_error_x = stat("MMS_"+str(mesh[1])+".stat")["NS"]["VelocityError%1"]["l2norm"][-1]
    a_error_y = stat("MMS_"+str(mesh[0])+".stat")["NS"]["VelocityError%2"]["l2norm"][-1]
    b_error_y = stat("MMS_"+str(mesh[1])+".stat")["NS"]["VelocityError%2"]["l2norm"][-1]

    ratio_x = a_error_x / b_error_x
    ratio_y = a_error_y / b_error_y

    print(mesh[0] + '->' + mesh[1] + ': ', [log(ratio_x, 2), log(ratio_y, 2)])

    convergence[0] = min(log(ratio_x, 2), log(ratio_y, 2), convergence[0])

print('-------------------------------------------')

print('EddyViscosityError:')
print('-------------------------------------------')
Пример #30
0
#!/usr/bin/python

# This sript plots up the RMS velocity for both the 24x24 and 48x48 case
import pylab
from fluidity_tools import stat_parser as stat

# Stafiles:
statfile24="stokes-sc-Ra1e5-24.stat"
statfile48="stokes-sc-Ra1e5-48.stat"

# First plot 24x24 case:
pylab.plot(stat(statfile24)["CoordinateMesh"]["nodes"][-1],
      stat(statfile24)["Fluid"]["Velocity%magnitude"]["l2norm"][-1],
      linestyle='None', marker='o', markerfacecolor='0.15')

# Next plot 48x48 case:
pylab.plot(stat(statfile48)["CoordinateMesh"]["nodes"][-1],
      stat(statfile48)["Fluid"]["Velocity%magnitude"]["l2norm"][-1],
      linestyle='None', marker='o', markerfacecolor='0.15')

# Plot benchmark value as line for comparison:
pylab.plot([100,8e4],[193.214,193.214],'k--',lw=0.6)

pylab.xlabel(r"Vertices")
pylab.ylabel(r"RMS Velocity")
pylab.xlim(100,1e4)
pylab.ylim(192.0,195.0)

pylab.savefig("RMS_1e5.png")

Пример #31
0
import numpy as np

meshes = [['A','B'], ['B','C'], ['C','D']]

convergence = np.ones(15) * 1e10

print ''
print 'ORDER OF CONVERGENCE'
print '-------------------------------------------'

print 'VelocityError:'
print '-------------------------------------------'

for i, mesh in enumerate(meshes):

    a_error_x = stat("MMS_"+str(mesh[0])+".stat")["NS"]["VelocityError%1"]["l2norm"][-1]
    b_error_x = stat("MMS_"+str(mesh[1])+".stat")["NS"]["VelocityError%1"]["l2norm"][-1]
    a_error_y = stat("MMS_"+str(mesh[0])+".stat")["NS"]["VelocityError%2"]["l2norm"][-1]
    b_error_y = stat("MMS_"+str(mesh[1])+".stat")["NS"]["VelocityError%2"]["l2norm"][-1]

    ratio_x = a_error_x / b_error_x
    ratio_y = a_error_y / b_error_y

    print mesh[0] + '->' + mesh[1] + ': ', [log(ratio_x, 2), log(ratio_y, 2)]

    convergence[0] = min(log(ratio_x, 2), log(ratio_y, 2), convergence[0])

print '-------------------------------------------'

fields = ['TurbulentKineticEnergyProductionError', 
          'TurbulentKineticEnergyDestructionError',
Пример #32
0
def report_convergence(file1, file2):
    print file1, "->", file2

    stat1 = stat(file1)
    stat2 = stat(file2)

    print stat1["dt"]["value"][0], "->", stat2["dt"]["value"][0]

    errortop_l2_1 = sqrt(
        sum(stat1["Fluid"]["DifferenceSquared"]
            ["surface_integral%TopSurfaceL2Norm"][1:] *
            stat1["dt"]["value"][1:]))
    errortop_l2_2 = sqrt(
        sum(stat2["Fluid"]["DifferenceSquared"]
            ["surface_integral%TopSurfaceL2Norm"][1:] *
            stat2["dt"]["value"][1:]))
    convergencetop_l2 = log((errortop_l2_1 / errortop_l2_2), 2)

    print '  convergencetop_l2 = ', convergencetop_l2
    print '    errortop_l2_1 = ', errortop_l2_1
    print '    errortop_l2_2 = ', errortop_l2_2

    errorbottom_l2_1 = sqrt(
        sum(stat1["Fluid"]["DifferenceSquared"]
            ["surface_integral%BottomSurfaceL2Norm"][1:] *
            stat1["dt"]["value"][1:]))
    errorbottom_l2_2 = sqrt(
        sum(stat2["Fluid"]["DifferenceSquared"]
            ["surface_integral%BottomSurfaceL2Norm"][1:] *
            stat2["dt"]["value"][1:]))
    convergencebottom_l2 = log((errorbottom_l2_1 / errorbottom_l2_2), 2)

    print '  convergencebottom_l2 = ', convergencebottom_l2
    print '    errorbottom_l2_1 = ', errorbottom_l2_1
    print '    errorbottom_l2_2 = ', errorbottom_l2_2

    error_l2_1 = sqrt(
        sum(stat1["Fluid"]["DifferenceSquared"]
            ["surface_integral%SurfaceL2Norm"][1:] * stat1["dt"]["value"][1:]))
    error_l2_2 = sqrt(
        sum(stat2["Fluid"]["DifferenceSquared"]
            ["surface_integral%SurfaceL2Norm"][1:] * stat2["dt"]["value"][1:]))
    convergence_l2 = log((error_l2_1 / error_l2_2), 2)

    print '  convergence_l2 = ', convergence_l2
    print '    error_l2_1 = ', error_l2_1
    print '    error_l2_2 = ', error_l2_2

    error_linf_1 = stat1["Fluid"]["FreeSurfaceDifference"]["max"].max()
    error_linf_2 = stat2["Fluid"]["FreeSurfaceDifference"]["max"].max()
    convergence_linf = log((error_linf_1 / error_linf_2), 2)

    print '  convergence_linf = ', convergence_linf
    print '    error_linf_1 = ', error_linf_1
    print '    error_linf_2 = ', error_linf_2

    quad1 = quad(lambda t: solution.nond_error_amp(stat1, t)**2,
                 stat1["ElapsedTime"]["value"][0],
                 stat1["ElapsedTime"]["value"][-1],
                 limit=1000)
    quad2 = quad(lambda t: solution.nond_error_amp(stat2, t)**2,
                 stat2["ElapsedTime"]["value"][0],
                 stat2["ElapsedTime"]["value"][-1],
                 limit=1000)
    errormaxfs_l2_1 = sqrt(quad1[0])
    errormaxfs_l2_2 = sqrt(quad2[0])
    convergencemaxfs_l2 = log((errormaxfs_l2_1 / errormaxfs_l2_2), 2)

    print '  convergencemaxfs_l2 = ', convergencemaxfs_l2
    print '    errormaxfs_l2_1 = ', errormaxfs_l2_1, '(', quad1[1], ')'
    print '    errormaxfs_l2_2 = ', errormaxfs_l2_2, '(', quad2[1], ')'

    return [convergencetop_l2, convergencebottom_l2, convergence_linf]
Пример #33
0
def stats(fname):
    error = stat(fname + ".stat")["Fluid"]["Error%magnitude"]["l2norm"][:]
    t = stat(fname + ".stat")["ElapsedTime"]["value"][:]
    return t, error
def report_convergence(file1, file2):
    print(file1, "->", file2)

    stat1 = stat(file1)
    stat2 = stat(file2)

    print(stat1["dt"]["value"][0], "->", stat2["dt"]["value"][0])

    errortop_l2_1 = sqrt(
        sum(stat1["Fluid"]["FreeSurfaceDifference"]["surface_l2norm%Top"][1:]**
            2 * stat1["dt"]["value"][1:]))
    errortop_l2_2 = sqrt(
        sum(stat2["Fluid"]["FreeSurfaceDifference"]["surface_l2norm%Top"][1:]**
            2 * stat2["dt"]["value"][1:]))
    convergencetop_l2 = log((errortop_l2_1 / errortop_l2_2), 2)

    print('  convergencetop_l2 = ', convergencetop_l2)
    print('    errortop_l2_1 = ', errortop_l2_1)
    print('    errortop_l2_2 = ', errortop_l2_2)

    errorbottom_l2_1 = sqrt(
        sum(stat1["Fluid"]["FreeSurfaceDifference"]["surface_l2norm%Bottom"]
            [1:]**2 * stat1["dt"]["value"][1:]))
    errorbottom_l2_2 = sqrt(
        sum(stat2["Fluid"]["FreeSurfaceDifference"]["surface_l2norm%Bottom"]
            [1:]**2 * stat2["dt"]["value"][1:]))
    convergencebottom_l2 = log((errorbottom_l2_1 / errorbottom_l2_2), 2)

    print('  convergencebottom_l2 = ', convergencebottom_l2)
    print('    errorbottom_l2_1 = ', errorbottom_l2_1)
    print('    errorbottom_l2_2 = ', errorbottom_l2_2)

    error_l2_1 = sqrt(
        sum(stat1["Fluid"]["FreeSurfaceDifference"]["surface_l2norm%Both"][1:]
            **2 * stat1["dt"]["value"][1:]))
    error_l2_2 = sqrt(
        sum(stat2["Fluid"]["FreeSurfaceDifference"]["surface_l2norm%Both"][1:]
            **2 * stat2["dt"]["value"][1:]))
    convergence_l2 = log((error_l2_1 / error_l2_2), 2)

    print('  convergence_l2 = ', convergence_l2)
    print('    error_l2_1 = ', error_l2_1)
    print('    error_l2_2 = ', error_l2_2)

    error_linf_1 = stat1["Fluid"]["FreeSurfaceDifference"]["max"].max()
    error_linf_2 = stat2["Fluid"]["FreeSurfaceDifference"]["max"].max()
    convergence_linf = log((error_linf_1 / error_linf_2), 2)

    print('  convergence_linf = ', convergence_linf)
    print('    error_linf_1 = ', error_linf_1)
    print('    error_linf_2 = ', error_linf_2)

    quad1 = quad(lambda t: solution.nond_error_amp(stat1, t)**2,
                 stat1["ElapsedTime"]["value"][0],
                 stat1["ElapsedTime"]["value"][-1],
                 limit=1000)
    quad2 = quad(lambda t: solution.nond_error_amp(stat2, t)**2,
                 stat2["ElapsedTime"]["value"][0],
                 stat2["ElapsedTime"]["value"][-1],
                 limit=1000)
    errormaxfs_l2_1 = sqrt(quad1[0])
    errormaxfs_l2_2 = sqrt(quad2[0])
    convergencemaxfs_l2 = log((errormaxfs_l2_1 / errormaxfs_l2_2), 2)

    print('  convergencemaxfs_l2 = ', convergencemaxfs_l2)
    print('    errormaxfs_l2_1 = ', errormaxfs_l2_1, '(', quad1[1], ')')
    print('    errormaxfs_l2_2 = ', errormaxfs_l2_2, '(', quad2[1], ')')

    return [convergencetop_l2, convergencebottom_l2, convergence_linf]
Пример #35
0
def stats(fname):
    error=stat(fname+".stat")["Fluid"]["Error%magnitude"]["l2norm"][:]
    t=stat(fname+".stat")["ElapsedTime"]["value"][:]
    return t,error
Пример #36
0
#!/usr/bin/python

from fluidity_tools import stat_parser as stat
from vtktools import *
from math import log
import numpy as np

meshes = [['A','B'], ['B','C']]#, ['C','D']]

convergence = np.ones(2) * 1e10

print('')
print('ORDER OF CONVERGENCE')
print('-------------------------------------------')

print('TracerError:')
print('-------------------------------------------')

for i, mesh in enumerate(meshes):

    a_error = stat("MMS_"+str(mesh[0])+".stat")["NS"]["TracerError"]["l2norm"][-1]
    b_error = stat("MMS_"+str(mesh[1])+".stat")["NS"]["TracerError"]["l2norm"][-1]

    ratio = a_error / b_error

    print(mesh[0] + '->' + mesh[1] + ': ', log(ratio, 2))

    convergence = log(ratio, 2)

print('-------------------------------------------')
Пример #37
0
    return average, min_data, max_data, nodes, halos


def strong_scaling(t0, c0, tn, cn):
    """
        Takes the time and number of cores for 2 simulations and returns 
    """
    return t0 / (cn / c0 * tn) * 100


#%%

for cpu in CORES:
    os.chdir(f'{DIR}/core{cpu}')
    lb_time[cpu] = stat(STAT_F)['RadiantLoadBalanceTime']['Value']
    wall_time[cpu] = stat(STAT_F)['ElapsedWallTime']['Value']
    cdofs[cpu] = stat(STAT_F)['ContinuousDOF_per_group']['Value']
    AV[cpu], MIN[cpu], MAX[cpu], NODES[cpu], HALOS[cpu] = av_min_max_halos(
        'rad_dogleg_noio', cpu, 5)

os.chdir('/home/gn/Dropbox/PhD/ANSWERS_seminar/2019/figures')

#%% [markdown]
# # LB Time vs CDOF for all cores

#%%
fname = 'LB_time_vs_CDOF.png'
fig, ax = plt.subplots(figsize=(10, 5))
fig.canvas.set_window_title(fname)
for cpu in CORES:
# The following topics are examined in the report:
# - Time spent on load balancer
# - Time spent on load balancer as a function of CPUs, Elements and halo nodes
# - Time spent doing a solve as a function of CPUs, Elements and halo nodes
# - Strong scaling studies for the asymmetrical brunner lattice problem
# - Weak scaling studies for the asymmetrical brunner lattice problem#
#
#%% [markdown]
# ## Are we better off load balancing? overall performance with load balancer on vs load balancer off
# The runtime of FETCH appears to be doubled when the load balancer is disabled for this simple but highly
# asymmetrical problem.
#

#%%
os.chdir('/home/gn/Code/ssh/sdargav/tolerance_study')
wall_time_off = stat('rad_radiant_tol1000000.Neutron.output_quantities.stat'
                     )['ElapsedWallTime']['Value']
wall_time_on = stat('rad_radiant_tol1.05.Neutron.output_quantities.stat'
                    )['ElapsedWallTime']['Value']
angle_adapt = stat('rad_radiant_tol1.05.Neutron.output_quantities.stat'
                   )['AngleAdaptCount']['Value']

compare_plot(range(1,
                   len(angle_adapt) + 1), wall_time_off,
             range(1,
                   len(angle_adapt) + 1), wall_time_on, 'LB off', 'LB on',
             'Angle adapt', 'Wall Time [s]')
plt.plot()

#%% [markdown]
# ## Are we better off load balancing? Time spent on solver when LB is off vs on
# It can be seen that the doubling of walltime when the load balancer is disabled clearly transfers to the solver