2 #include "dg/dg_factory.hpp" 3 #include "TGV_scaling.h" 4 #include "physics/initial_conditions/set_initial_condition.h" 5 #include "physics/initial_conditions/initial_condition_function.h" 6 #include "mesh/grids/nonsymmetric_curved_periodic_grid.hpp" 7 #include "mesh/grids/straight_periodic_cube.hpp" 9 #include <deal.II/base/timer.h> 14 template <
int dim,
int nstate>
19 template <
int dim,
int nstate>
22 using Triangulation = dealii::parallel::distributed::Triangulation<dim>;
23 std::shared_ptr<Triangulation> grid = std::make_shared<Triangulation>(
25 typename dealii::Triangulation<dim>::MeshSmoothing(
26 dealii::Triangulation<dim>::smoothing_on_refinement |
27 dealii::Triangulation<dim>::smoothing_on_coarsening));
33 double right = 2 * dealii::numbers::PI;
38 PHiLiP::Grids::nonsymmetric_curved_grid<dim,Triangulation>(*grid, n_refinements);
42 PHiLiP::Grids::straight_periodic_cube<dim,Triangulation>(grid, left, right, pow(2.0,n_refinements));
45 std::ofstream myfile (all_parameters_new.
energy_file +
".gpl" , std::ios::trunc);
49 pcout <<
"Max poly degree: " << poly_degree_end << std::endl;
50 std::vector<double> time_to_run;
51 time_to_run.reserve(poly_degree_end);
53 std::vector<double> time_to_run_mpi;
54 time_to_run_mpi.reserve(poly_degree_end);
57 for(
unsigned int poly_degree = poly_degree_start; poly_degree<poly_degree_end; poly_degree++){
73 dg->allocate_system (
false,
false,
false);
76 std::shared_ptr< InitialConditionFunction<dim,nstate,double> > initial_condition_function =
82 ode_solver->current_iteration = 0;
83 ode_solver->allocate_ode_system();
84 MPI_Barrier(MPI_COMM_WORLD);
85 pcout <<
"ODE solver successfully created. This verifies no memory jump from ODE Solver." << std::endl;
86 dealii::LinearAlgebra::distributed::Vector<double> solution_update;
87 solution_update.reinit(dg->locally_owned_dofs, dg->ghost_dofs, this->mpi_communicator);
90 for(
unsigned int i_step=0; i_step<10; i_step++){
91 dg->assemble_residual();
93 dg->apply_inverse_global_mass_matrix(dg->right_hand_side, solution_update);
95 dg->global_inverse_mass_matrix.vmult(solution_update, dg->right_hand_side);
100 time_to_run[poly_degree] = dg->assemble_residual_time;
102 time_to_run_mpi[poly_degree] = dealii::Utilities::MPI::sum(time_to_run[poly_degree], this->
mpi_communicator);
104 std::cout<<
"Poly Degree "<<poly_degree<<
" time to run local cpu "<<std::fixed << std::setprecision(16) << (double)time_to_run[poly_degree]<<std::endl;
105 MPI_Barrier(MPI_COMM_WORLD);
106 pcout<<
"Poly Degree "<<poly_degree<<
" time to run Mpi "<<std::fixed << std::setprecision(16) << (double)time_to_run_mpi[poly_degree]<<std::endl;
107 myfile << poly_degree <<
" " << std::fixed << std::setprecision(16) << time_to_run_mpi[poly_degree]<< std::endl;
112 double avg_slope = 0.0;
114 pcout<<
"Times for one timestep"<<std::endl;
115 pcout<<
"local time | Slope | "<<
"MPI sum time | Slope "<<std::endl;
116 for(
unsigned int i=poly_degree_start+1; i<poly_degree_end; i++){
117 const double slope_local = std::log(((
double)time_to_run[i]) / ((
double)time_to_run[i-1]))
118 / std::log((
double)((i+1.0)/(i)));
119 const double slope_mpi = std::log(((
double)time_to_run_mpi[i]) /( (
double)time_to_run_mpi[i-1]))
120 / std::log((
double)((i+1.0)/(i)));
121 pcout<<(double)time_to_run[i]<<
" "<< slope_local <<
" "<<
122 (
double)time_to_run_mpi[i]<<
" "<< slope_mpi <<
124 if(i>poly_degree_end-4){
125 avg_slope += slope_mpi;
131 if(avg_slope - (dim + 1) > 0.8){
132 pcout<<
"Did not scale at dim + 1. Instead scaled at "<<avg_slope<<std::endl;
151 pcout<<
"Checking that it does not run out of memory for poly degree "<<poly_degree<<std::endl;
156 dg->allocate_system (
false,
false,
false);
158 std::shared_ptr< InitialConditionFunction<dim,nstate,double> > initial_condition_function =
164 ode_solver->current_iteration = 0;
165 ode_solver->allocate_ode_system();
166 MPI_Barrier(MPI_COMM_WORLD);
167 pcout <<
"ODE solver successfully created. This verifies no memory jump from ODE Solver." << std::endl;
168 dealii::LinearAlgebra::distributed::Vector<double> solution_update;
169 solution_update.reinit(dg->locally_owned_dofs, dg->ghost_dofs, this->mpi_communicator);
171 for(
unsigned int i_step=0; i_step<10; i_step++){
172 dg->assemble_residual();
174 dg->apply_inverse_global_mass_matrix(dg->right_hand_side, solution_update);
176 dg->global_inverse_mass_matrix.vmult(solution_update, dg->right_hand_side);
180 catch(std::bad_alloc &e){
181 std::cout <<
"ending with bad_alloc (ran out of memory)" << std::endl;
182 std::cout <<
"If the test fails here, then there is unnecessary memory being allocated." << std::endl;
bool use_curvilinear_grid
Flag to use curvilinear grid.
FlowSolverParam flow_solver_param
Contains the parameters for simulation cases (flow solver test)
int run_test() const override
Ensure that the kinetic energy is bounded.
const MPI_Comm mpi_communicator
MPI communicator.
Files for the baseline physics.
unsigned int poly_degree
Polynomial order (P) of the basis functions for DG.
Main parameter class that contains the various other sub-parameter classes.
Euler Taylor Green Vortex Scaling Test.
std::string energy_file
Energy file.
const Parameters::AllParameters *const all_parameters
Pointer to all parameters.
static std::shared_ptr< DGBase< dim, real, MeshType > > create_discontinuous_galerkin(const Parameters::AllParameters *const parameters_input, const unsigned int degree, const unsigned int max_degree_input, const unsigned int grid_degree_input, const std::shared_ptr< Triangulation > triangulation_input)
Creates a derived object DG, but returns it as DGBase.
int number_of_mesh_refinements
Number of refinements for naca0012 and Gaussian bump based cases.
EulerTaylorGreenScaling(const Parameters::AllParameters *const parameters_input)
Constructor.
bool use_inverse_mass_on_the_fly
Flag to use inverse mass matrix on-the-fly for explicit solves.
dealii::ConditionalOStream pcout
ConditionalOStream.
static std::shared_ptr< ODESolverBase< dim, real, MeshType > > create_ODESolver(std::shared_ptr< DGBase< dim, real, MeshType > > dg_input)
Creates either implicit or explicit ODE solver based on parameter value(no POD basis given) ...
static void set_initial_condition(std::shared_ptr< InitialConditionFunction< dim, nstate, double > > initial_condition_function_input, std::shared_ptr< PHiLiP::DGBase< dim, real > > dg_input, const Parameters::AllParameters *const parameters_input)
Applies the given initial condition function to the given dg object.
Base class of all the tests.
int overintegration
Number of additional quadrature points to use.
static std::shared_ptr< InitialConditionFunction< dim, nstate, real > > create_InitialConditionFunction(Parameters::AllParameters const *const param)
Construct InitialConditionFunction object from global parameter file.