[P]arallel [Hi]gh-order [Li]brary for [P]DEs  Latest
Parallel High-Order Library for PDEs through hp-adaptive Discontinuous Galerkin methods
TGV_scaling.cpp
1 #include <fstream>
2 #include "dg/dg_factory.hpp"
3 #include "TGV_scaling.h"
4 #include "physics/initial_conditions/set_initial_condition.h"
5 #include "physics/initial_conditions/initial_condition_function.h"
6 #include "mesh/grids/nonsymmetric_curved_periodic_grid.hpp"
7 #include "mesh/grids/straight_periodic_cube.hpp"
8 #include <time.h>
9 #include <deal.II/base/timer.h>
10 
11 namespace PHiLiP {
12 namespace Tests {
13 
14 template <int dim, int nstate>
16  : TestsBase::TestsBase(parameters_input)
17 {}
18 
19 template <int dim, int nstate>
21 {
22  using Triangulation = dealii::parallel::distributed::Triangulation<dim>;
23  std::shared_ptr<Triangulation> grid = std::make_shared<Triangulation>(
24  MPI_COMM_WORLD,
25  typename dealii::Triangulation<dim>::MeshSmoothing(
26  dealii::Triangulation<dim>::smoothing_on_refinement |
27  dealii::Triangulation<dim>::smoothing_on_coarsening));
28 
29  using real = double;
30 
32  double left = 0.0;
33  double right = 2 * dealii::numbers::PI;
34 
35  const unsigned int n_refinements = all_parameters->flow_solver_param.number_of_mesh_refinements;
37  //if curvilinear
38  PHiLiP::Grids::nonsymmetric_curved_grid<dim,Triangulation>(*grid, n_refinements);
39  }
40  else{
41  //if straight
42  PHiLiP::Grids::straight_periodic_cube<dim,Triangulation>(grid, left, right, pow(2.0,n_refinements));
43  }
44 
45  std::ofstream myfile (all_parameters_new.energy_file + ".gpl" , std::ios::trunc);
46  const unsigned int poly_degree_start= all_parameters->flow_solver_param.poly_degree;
47 
48  const unsigned int poly_degree_end = (all_parameters->use_curvilinear_grid) ? 11 : 16;
49  pcout << "Max poly degree: " << poly_degree_end << std::endl;
50  std::vector<double> time_to_run;
51  time_to_run.reserve(poly_degree_end);
52  //std::array<double,poly_degree_end> time_to_run_mpi;
53  std::vector<double> time_to_run_mpi;
54  time_to_run_mpi.reserve(poly_degree_end);
55 
56  //poly degree loop
57  for(unsigned int poly_degree = poly_degree_start; poly_degree<poly_degree_end; poly_degree++){
58 
59  // set the warped grid
60  const unsigned int grid_degree = (all_parameters->use_curvilinear_grid) ? poly_degree : 1;
61 
62  if(all_parameters->overintegration == 100){
64  all_parameters_new.overintegration = 2*(poly_degree+1);
65  }
66  else{
67  all_parameters_new.overintegration = poly_degree+1;
68  }
69  }
70 
71  // Create DG
72  std::shared_ptr < PHiLiP::DGBase<dim, double> > dg = PHiLiP::DGFactory<dim,double>::create_discontinuous_galerkin(&all_parameters_new, poly_degree, poly_degree, grid_degree, grid);
73  dg->allocate_system (false,false,false);
74 
75  //Apply initial condition for TGV
76  std::shared_ptr< InitialConditionFunction<dim,nstate,double> > initial_condition_function =
78  SetInitialCondition<dim,nstate,double>::set_initial_condition(initial_condition_function, dg, &all_parameters_new);
79  //Create ODE system.
80  std::shared_ptr<ODE::ODESolverBase<dim, double>> ode_solver = ODE::ODESolverFactory<dim, double>::create_ODESolver(dg);
81 
82  ode_solver->current_iteration = 0;
83  ode_solver->allocate_ode_system();
84  MPI_Barrier(MPI_COMM_WORLD);
85  pcout << "ODE solver successfully created. This verifies no memory jump from ODE Solver." << std::endl;
86  dealii::LinearAlgebra::distributed::Vector<double> solution_update;
87  solution_update.reinit(dg->locally_owned_dofs, dg->ghost_dofs, this->mpi_communicator);
88 
89  //Perform 10 steps solving the reidual and applying the inverse of the mass matrix.
90  for(unsigned int i_step=0; i_step<10; i_step++){
91  dg->assemble_residual();
93  dg->apply_inverse_global_mass_matrix(dg->right_hand_side, solution_update);
94  } else{
95  dg->global_inverse_mass_matrix.vmult(solution_update, dg->right_hand_side);
96  }
97  }
98 
99  //store local cpu time
100  time_to_run[poly_degree] = dg->assemble_residual_time;
101  //store mpi summed cpu time
102  time_to_run_mpi[poly_degree] = dealii::Utilities::MPI::sum(time_to_run[poly_degree], this->mpi_communicator);
103 
104  std::cout<<"Poly Degree "<<poly_degree<<" time to run local cpu "<<std::fixed << std::setprecision(16) << (double)time_to_run[poly_degree]<<std::endl;
105  MPI_Barrier(MPI_COMM_WORLD);
106  pcout<<"Poly Degree "<<poly_degree<<" time to run Mpi "<<std::fixed << std::setprecision(16) << (double)time_to_run_mpi[poly_degree]<<std::endl;
107  myfile << poly_degree << " " << std::fixed << std::setprecision(16) << time_to_run_mpi[poly_degree]<< std::endl;
108  }//end of poly loop
109 
110 
111  myfile.close();
112  double avg_slope = 0.0;
113  //Print a chart for time and slopes.
114  pcout<<"Times for one timestep"<<std::endl;
115  pcout<<"local time | Slope | "<<"MPI sum time | Slope "<<std::endl;
116  for(unsigned int i=poly_degree_start+1; i<poly_degree_end; i++){
117  const double slope_local = std::log(((double)time_to_run[i]) / ((double)time_to_run[i-1]))
118  / std::log((double)((i+1.0)/(i)));
119  const double slope_mpi = std::log(((double)time_to_run_mpi[i]) /( (double)time_to_run_mpi[i-1]))
120  / std::log((double)((i+1.0)/(i)));
121  pcout<<(double)time_to_run[i]<<" "<< slope_local <<" "<<
122  (double)time_to_run_mpi[i]<<" "<< slope_mpi <<
123  std::endl;
124  if(i>poly_degree_end-4){
125  avg_slope += slope_mpi;
126  }
127  }
128  avg_slope /= (3.0);
129 
130  //The error tolerance on the slope is set to 0.8 because the CPU time is very sensitive to the processes being ran.
131  if(avg_slope - (dim + 1) > 0.8){
132  pcout<<"Did not scale at dim + 1. Instead scaled at "<<avg_slope<<std::endl;
133  return 1;
134  }
135 
136 
137 
138  //check that it can run up to p=30 for Cartesian or p=20 for curvilinear without running out of memory.
139  const unsigned int poly_degree = (all_parameters->use_curvilinear_grid) ? 20 : 30;
140  const unsigned int grid_degree = (all_parameters->use_curvilinear_grid) ? poly_degree : 1;
141 
142  if(all_parameters->overintegration == 100){
144  all_parameters_new.overintegration = 2*(poly_degree+1);
145  }
146  else{
147  all_parameters_new.overintegration = poly_degree+1;
148  }
149  }
150 
151  pcout<<"Checking that it does not run out of memory for poly degree "<<poly_degree<<std::endl;
152  // For curvilinear cases, check allocation in high order grid.
153  // Create DG
154  std::shared_ptr < PHiLiP::DGBase<dim, double> > dg = PHiLiP::DGFactory<dim,double>::create_discontinuous_galerkin(&all_parameters_new, poly_degree, poly_degree, grid_degree, grid);
155  try{
156  dg->allocate_system (false,false,false);
157 
158  std::shared_ptr< InitialConditionFunction<dim,nstate,double> > initial_condition_function =
160  SetInitialCondition<dim,nstate,double>::set_initial_condition(initial_condition_function, dg, &all_parameters_new);
161 
162  std::shared_ptr<ODE::ODESolverBase<dim, double>> ode_solver = ODE::ODESolverFactory<dim, double>::create_ODESolver(dg);
163 
164  ode_solver->current_iteration = 0;
165  ode_solver->allocate_ode_system();
166  MPI_Barrier(MPI_COMM_WORLD);
167  pcout << "ODE solver successfully created. This verifies no memory jump from ODE Solver." << std::endl;
168  dealii::LinearAlgebra::distributed::Vector<double> solution_update;
169  solution_update.reinit(dg->locally_owned_dofs, dg->ghost_dofs, this->mpi_communicator);
170 
171  for(unsigned int i_step=0; i_step<10; i_step++){
172  dg->assemble_residual();
174  dg->apply_inverse_global_mass_matrix(dg->right_hand_side, solution_update);
175  } else{
176  dg->global_inverse_mass_matrix.vmult(solution_update, dg->right_hand_side);
177  }
178  }
179  }
180  catch(std::bad_alloc &e){
181  std::cout << "ending with bad_alloc (ran out of memory)" << std::endl;
182  std::cout << "If the test fails here, then there is unnecessary memory being allocated." << std::endl;
183  return 1;
184  }
185  //if it reaches here, then there is no memory issue.
186  return 0;
187 }
188 
189 #if PHILIP_DIM==3
191 #endif
192 
193 } // Tests namespace
194 } // PHiLiP namespace
195 
bool use_curvilinear_grid
Flag to use curvilinear grid.
FlowSolverParam flow_solver_param
Contains the parameters for simulation cases (flow solver test)
int run_test() const override
Ensure that the kinetic energy is bounded.
Definition: TGV_scaling.cpp:20
const MPI_Comm mpi_communicator
MPI communicator.
Definition: tests.h:39
Files for the baseline physics.
Definition: ADTypes.hpp:10
unsigned int poly_degree
Polynomial order (P) of the basis functions for DG.
Main parameter class that contains the various other sub-parameter classes.
Euler Taylor Green Vortex Scaling Test.
Definition: TGV_scaling.h:38
std::string energy_file
Energy file.
const Parameters::AllParameters *const all_parameters
Pointer to all parameters.
Definition: tests.h:20
static std::shared_ptr< DGBase< dim, real, MeshType > > create_discontinuous_galerkin(const Parameters::AllParameters *const parameters_input, const unsigned int degree, const unsigned int max_degree_input, const unsigned int grid_degree_input, const std::shared_ptr< Triangulation > triangulation_input)
Creates a derived object DG, but returns it as DGBase.
Definition: dg_factory.cpp:10
int number_of_mesh_refinements
Number of refinements for naca0012 and Gaussian bump based cases.
EulerTaylorGreenScaling(const Parameters::AllParameters *const parameters_input)
Constructor.
Definition: TGV_scaling.cpp:15
bool use_inverse_mass_on_the_fly
Flag to use inverse mass matrix on-the-fly for explicit solves.
dealii::ConditionalOStream pcout
ConditionalOStream.
Definition: tests.h:45
static std::shared_ptr< ODESolverBase< dim, real, MeshType > > create_ODESolver(std::shared_ptr< DGBase< dim, real, MeshType > > dg_input)
Creates either implicit or explicit ODE solver based on parameter value(no POD basis given) ...
static void set_initial_condition(std::shared_ptr< InitialConditionFunction< dim, nstate, double > > initial_condition_function_input, std::shared_ptr< PHiLiP::DGBase< dim, real > > dg_input, const Parameters::AllParameters *const parameters_input)
Applies the given initial condition function to the given dg object.
Base class of all the tests.
Definition: tests.h:17
int overintegration
Number of additional quadrature points to use.
static std::shared_ptr< InitialConditionFunction< dim, nstate, real > > create_InitialConditionFunction(Parameters::AllParameters const *const param)
Construct InitialConditionFunction object from global parameter file.