[P]arallel [Hi]gh-order [Li]brary for [P]DEs  Latest
Parallel High-Order Library for PDEs through hp-adaptive Discontinuous Galerkin methods
metric_to_mesh_generator.cpp
1 #include "metric_to_mesh_generator.h"
2 #include <deal.II/dofs/dof_renumbering.h>
3 #include <deal.II/distributed/shared_tria.h>
4 #include <deal.II/dofs/dof_tools.h>
5 #include <deal.II/lac/la_parallel_vector.h>
6 #include <ostream>
7 #include <fstream>
8 #include "grid_refinement/gmsh_out.h"
9 #include <cstdlib>
10 
11 namespace PHiLiP {
12 
13 template<int dim, int nstate, typename real, typename MeshType>
15  std::shared_ptr<dealii::MappingFEField<dim,dim,VectorType,DoFHandlerType>> _volume_nodes_mapping,
16  std::shared_ptr<MeshType> _triangulation)
17  : volume_nodes_mapping(_volume_nodes_mapping)
18  , triangulation(_triangulation)
19  , dof_handler_vertices(*triangulation)
20  , fe_q(1)
21  , fe_system(dealii::FESystem<dim>(fe_q,1)) // setting nstate = 1 to index vertices
22  , mpi_communicator(MPI_COMM_WORLD)
23  , pcout(std::cout, dealii::Utilities::MPI::this_mpi_process(mpi_communicator)==0)
24  , filename("mesh_to_be_generated")
25  , filename_pos(filename + ".pos")
26  , filename_geo(filename + ".geo")
27  , filename_msh(filename + ".msh")
28 {
29  MPI_Comm_rank(mpi_communicator, &mpi_rank);
30  MPI_Comm_size(mpi_communicator, &n_mpi);
31 
32  if(dim != 2)
33  {
34  pcout<<"This class is currently hardcoded for dim = 2. To be updated later when required."<<std::endl<<std::flush;
35  std::abort();
36  }
37 
38  reinit();
39 }
40 
41 template<int dim, int nstate, typename real, typename MeshType>
43 {
45  dof_handler_vertices.distribute_dofs(fe_system);
46  dealii::DoFRenumbering::Cuthill_McKee(dof_handler_vertices);
47 
48  locally_owned_vertex_dofs = dof_handler_vertices.locally_owned_dofs();
49  dealii::DoFTools::extract_locally_relevant_dofs(dof_handler_vertices, locally_relevant_vertex_dofs);
52 
53  const unsigned int n_vertices = dof_handler_vertices.n_dofs();
54 
55  for(unsigned int i=0; i<dim*dim; ++i)
56  {
58  }
59 
60  all_vertices.clear();
61  all_vertices.resize(n_vertices);
62 }
63 
64 template<int dim, int nstate, typename real, typename MeshType>
66  const std::vector<dealii::Tensor<2, dim, real>> &cellwise_optimal_metric)
67 {
68  reinit();
69  AssertDimension(cellwise_optimal_metric.size(), triangulation->n_active_cells());
70 
71  dealii::Quadrature<dim> quadrature = fe_system.get_unit_support_points();
72  dealii::FEValues<dim, dim> fe_values_vertices(*volume_nodes_mapping, fe_system, quadrature, dealii::update_quadrature_points);
73  const unsigned int n_dofs_cell = fe_system.dofs_per_cell;
74  std::vector<dealii::types::global_dof_index> dof_indices(n_dofs_cell);
75  AssertDimension(n_dofs_cell, dealii::GeometryInfo<dim>::vertices_per_cell)
76 
77  // Store vertices in each cell with global dof.
78  for(const auto &cell: dof_handler_vertices.active_cell_iterators())
79  {
80  if(! cell->is_locally_owned()) {continue;}
81 
82  fe_values_vertices.reinit(cell);
83  cell->get_dof_indices(dof_indices);
84 
85  const std::vector<dealii::Point<dim>> &cell_vertices = fe_values_vertices.get_quadrature_points();
86 
87  for(unsigned int idof = 0; idof < cell_vertices.size(); ++idof)
88  {
89  const unsigned int iquad = idof;
90  all_vertices[dof_indices[idof]] = cell_vertices[iquad];
91  }
92 
93  AssertDimension(cell_vertices.size(), dealii::GeometryInfo<dim>::vertices_per_cell);
94 
95  // Check if vertices are in the same order.
96  for(unsigned int idof = 0; idof < cell_vertices.size(); ++idof)
97  {
98  const dealii::Point<dim> &ref_vertex = dealii::GeometryInfo<dim>::unit_cell_vertex(idof);
99  const dealii::Point<dim> expected_vertex = volume_nodes_mapping->transform_unit_to_real_cell(cell, ref_vertex);
100  const real error_in_vertex = expected_vertex.distance(all_vertices[dof_indices[idof]]);
101  if(error_in_vertex > 1.0e-10)
102  {
103  std::cout<<"Expected vertex = "<<expected_vertex<<" "<<"Actual vertex = "<<all_vertices[dof_indices[idof]]<<std::endl;
104  std::cout<<"Error in ordering of vertices. Aborting.."<<std::flush;
105  std::abort();
106  }
107  }
108  } // cell loop ends
109 
110 //================================ Interpolate metric field ================================================
111 // Using Eq. 2.56 in Dolejší, Vít, and Georg May (2022). Anisotropic hp-Mesh Adaptation Methods: Theory, implementation and applications.
112  dealii::LinearAlgebra::distributed::Vector<int> metric_count_at_vertices(locally_owned_vertex_dofs, ghost_vertex_dofs, mpi_communicator);
113  metric_count_at_vertices = 0;
114 
115  for(const auto &cell: dof_handler_vertices.active_cell_iterators())
116  {
117  if(! cell->is_locally_owned()) {continue;}
118 
119  cell->get_dof_indices(dof_indices);
120  const unsigned int cell_index = cell->active_cell_index();
121 
122  for(unsigned int idof = 0; idof<n_dofs_cell; ++idof)
123  {
124  const unsigned int idof_global = dof_indices[idof];
125  metric_count_at_vertices(idof_global) += 1;
126  for(unsigned int i=0; i<dim; ++i)
127  {
128  for(unsigned int j=0; j<dim; ++j)
129  {
130  optimal_metric_at_vertices[i*dim + j](idof_global) += cellwise_optimal_metric[cell_index][i][j];
131  }
132  }
133  }
134  } // cell loop ends
135 
136  // Add contribution from all processsors at common idofs_global.
137  metric_count_at_vertices.compress(dealii::VectorOperation::add);
138  metric_count_at_vertices.update_ghost_values();;
139  for(unsigned int i=0; i<dim*dim; ++i)
140  {
141  optimal_metric_at_vertices[i].compress(dealii::VectorOperation::add);
142  optimal_metric_at_vertices[i].update_ghost_values();
143  }
144 
145  // Compute average
146  for(auto idof_global = locally_owned_vertex_dofs.begin(); idof_global != locally_owned_vertex_dofs.end(); ++idof_global)
147  {
148  for(unsigned int i=0; i<dim*dim; ++i)
149  {
150  optimal_metric_at_vertices[i](*idof_global) /= metric_count_at_vertices(*idof_global);
151  }
152  }
153 
154 
155  for(unsigned int i=0; i<dim*dim; ++i)
156  {
157  optimal_metric_at_vertices[i].update_ghost_values();
158  }
159 }
160 
161 template<int dim, int nstate, typename real, typename MeshType>
163 {
164  AssertDimension(fe_system.dofs_per_cell, dealii::GeometryInfo<dim>::vertices_per_cell);
165 
166  // Based on gmsh/tutorials/t17_bgmesh.pos in GMSH4.11.1.
167  // Adapted from GridRefinement::Gmsh_Out::write_pos_anisotropic() and modified to use metric field at nodes.
168  // Might need to figure out a way to link GridRefinement::Gmsh_Out and this class in future.
169  const std::string quotes = "\"";
170  const std::string filename_this_processor = filename + std::to_string(mpi_rank) + ".pos";
171  std::ofstream outfile(filename_this_processor);
172 
173  // DEAL.II's default quad numbering:
174  // 2 * * 3
175  // * *
176  // * *
177  // 0 * * 1
178 
179  // Split into 2 triangles, [0,2,3] and [0,3,1].
180 
181  // Write .pos file for each triangle using Tensor Triangle (TT) structure.
182  // TT(x1,y1,z1,x2,y2,z2,x3,y3,z3){M1, M2, M3};
183  // where Mi = m11, m12, m13, m21, m22, m23, m31, m32, m33;
184  std::vector<dealii::types::global_dof_index> dof_indices(fe_system.dofs_per_cell);
185 
186  for(const auto &cell : dof_handler_vertices.active_cell_iterators())
187  {
188  if(! cell->is_locally_owned()) {continue;}
189  cell->get_dof_indices(dof_indices);
190 
191  for(const auto &tri :
192  std::array<std::array<int,3>,2>{{
193  {{0,2,3}},
194  {{0,3,1}} }} )
195  {
196  outfile<<"TT(";
197  // Write vertices
198  for(unsigned int i = 0; i < tri.size(); ++i)
199  {
200  if(i != 0) {outfile << ",";}
201 
202  const unsigned int idof = tri[i];
203  const unsigned int idof_global = dof_indices[idof];
204  // x
205  const dealii::Point<dim> &pos = all_vertices[idof_global];
206  if(dim >= 1){outfile << pos[0] << ",";}
207  else {outfile << 0 << ",";}
208  // y
209  if(dim >= 2){outfile << pos[1] << ",";}
210  else {outfile << 0 << ",";}
211  // z
212  if(dim >= 3){outfile << pos[2];}
213  else {outfile << 0;}
214  }
215  outfile<<"){";
216  // Write metric
217  bool flag = false;
218 
219  for(unsigned int i = 0; i < tri.size(); ++i)
220  {
221  const unsigned int idof = tri[i];
222  const unsigned int idof_global = dof_indices[idof];
223 
224  dealii::Tensor<2,dim,real> vertex_metric;
225  for(unsigned int idim =0; idim < dim; ++idim)
226  {
227  for(unsigned int jdim =0; jdim < dim; ++jdim)
228  {
229  vertex_metric[idim][jdim] = optimal_metric_at_vertices[idim*dim + jdim][idof_global];
230  }
231  }
232 
233  for(unsigned int idim =0; idim < 3; ++idim)
234  {
235  for(unsigned int jdim =0; jdim < 3; ++jdim)
236  {
237  if(flag) {outfile<<",";}
238  else {flag = true;}
239 
240  if( (idim < dim) && (jdim < dim) )
241  {
242  outfile<<vertex_metric[idim][jdim];
243  }
244  else
245  {
246  // output 1 along diagonal
247  if(idim == jdim) {outfile<<1.0;}
248  else {outfile<<0;}
249  }
250  }
251  }
252  }
253 
254  outfile<<"};"<<'\n';
255 
256  } //loop on tri : std::array<std::array>> ends
257  } // cell loop ends
258  outfile<<std::flush;
259  outfile.close();
260  MPI_Barrier(mpi_communicator);
261 
262  // Now that all processors have output their files, processor#0 can access and recombine them all in a single file.
263  if(mpi_rank == 0)
264  {
265  std::ofstream outfile0(filename_pos);
266 
267  outfile0<<"// Background mesh containing optimal metric field at nodes."<<'\n';
268  outfile0<< "View "<<quotes<<"nodalMetric"<<quotes<<" {"<<'\n';
269 
270  for(int iproc = 0; iproc < n_mpi; ++iproc)
271  {
272  const std::string input_filename = filename + std::to_string(iproc) + ".pos";
273  std::ifstream inputfile (input_filename);
274  outfile0<<inputfile.rdbuf();
275  }
276 
277  outfile0<<"};"<<'\n';
278  outfile0.close();
279  }
280 
281  // Wait for processor#0 to be done.
282  MPI_Barrier(mpi_communicator);
283 }
284 
285 template<int dim, int nstate, typename real, typename MeshType>
287 {
288  // Based on gmsh/tutorials/t17.geo in GMSH 4.11.1.
289  std::ofstream outfile(filename_geo);
290  // Header
291  outfile<<" // GEO file "<<'\n'<<'\n';
292  const std::string quotes = "\"";
293 
294  outfile<<"SetFactory("<<quotes<<"OpenCASCADE"<<quotes<<");"<<'\n'<<'\n';
295 
296  // Geo file of rectangle
298 
299  // Merge .pos view
300  outfile<<"Merge "<<quotes<<filename_pos<<quotes<<";"<<'\n';
301 
302  // Apply the view as the current background mesh
303  outfile<<"Background Mesh View[0];"<<'\n';
304 
305  // Use BAMG (Algorithm 7) to generate mesh.
306  outfile<<"Mesh.SmoothRatio = 0;"<<'\n'; // No smoothing for now.
307  outfile<<"Mesh.AnisoMax = 1e30;"<<'\n';
308  outfile<<"Mesh.Algorithm = 7;"<<'\n';
309 
310  // Recombine triangles into quads.
311  outfile<<"Mesh.RecombinationAlgorithm = 2;"<<'\n';
312  outfile<<"Mesh.RecombineAll = 1;"<<'\n';
313  outfile<<std::flush;
314  outfile.close();
315 }
316 
317 template<int dim, int nstate, typename real, typename MeshType>
319  const std::vector<dealii::Tensor<2, dim, real>> &cellwise_optimal_metric)
320 {
321  interpolate_metric_to_vertices(cellwise_optimal_metric);
322  write_pos_file();
323  if(mpi_rank == 0)
324  {
325  write_geo_file();
326  // Running this on command line:
327  // For 2D: gmsh -dim filename.geo -o filename.msh
328  // For 3D: gmsh -dim -algo mmg3d filename.geo -o filename.msh
329  std::string command_str = "gmsh -2 " + filename_geo + " -o " + filename_msh;
330  int val = std::system(command_str.c_str());
331  (void) val;
332  pcout<<"Generated new mesh."<<std::endl;
333  }
334  MPI_Barrier(mpi_communicator);
335 }
336 
337 template<int dim, int nstate, typename real, typename MeshType>
339 {
340  return filename_msh;
341 }
342 
343 
344 // Instantiations
345 #if PHILIP_DIM!=1
351 #endif
352 } // PHiLiP namespace
const std::string filename_msh
.geo file
dealii::DoFHandler< dim > dof_handler_vertices
DoFHandler to get global index of vertices.
MetricToMeshGenerator(std::shared_ptr< dealii::MappingFEField< dim, dim, VectorType, DoFHandlerType >> _volume_nodes_mapping, std::shared_ptr< MeshType > _triangulation)
Constructor.
dealii::IndexSet ghost_vertex_dofs
Dofs not owned by current processor (but it can still access them).
std::shared_ptr< MeshType > triangulation
Stores triangulation currently in use.
const dealii::FESystem< dim > fe_system
FESystem for vertices, created with nstate = 1 to relate an entire vertex of size dim by a single dof...
Files for the baseline physics.
Definition: ADTypes.hpp:10
dealii::IndexSet locally_owned_vertex_dofs
Dofs owned by current processor.
int mpi_rank
Processor# of current processor.
static void write_geo_hyper_cube(const double left, const double right, std::ostream &out, const bool colorize=true)
Writes the part of the .geo file associated wit the hyperdube geometry.
Definition: gmsh_out.cpp:375
void generate_mesh_from_cellwise_metric(const std::vector< dealii::Tensor< 2, dim, real >> &cellwise_optimal_metric)
Generates mesh from the input cellwise metric.
std::shared_ptr< dealii::MappingFEField< dim, dim, VectorType, DoFHandlerType > > volume_nodes_mapping
Mapping field to update physical quadrature points, jacobians etc with the movement of volume nodes...
void write_pos_file() const
Writes .pos file.
void reinit()
Reinitialize dof handler vertices after updating triangulation.
dealii::IndexSet locally_relevant_vertex_dofs
Union of locally owned degrees of freedom and ghost degrees of freedom.
std::vector< dealii::Point< dim > > all_vertices
Stores all vertices.
void interpolate_metric_to_vertices(const std::vector< dealii::Tensor< 2, dim, real >> &cellwise_optimal_metric)
Interpolates cellwise metric field to vertices.
void write_geo_file() const
Writes .geo file.
std::string get_generated_mesh_filename() const
Returns name of the .msh file.
int n_mpi
Total no. of processors.
std::array< VectorType, dim *dim > optimal_metric_at_vertices
Stores optimal metric at vertices. Accessed by optimal_metric_at_vertices[position_in_metric_tensor][...
const std::string filename_pos
.pos file.
dealii::ConditionalOStream pcout
std::cout only on processor #0.
MPI_Comm mpi_communicator
Alias for MPI_COMM_WORLD.
Class to convert metric field to mesh using BAMG.
const std::string filename_geo
.geo file
const std::string filename
Name of the file.