[P]arallel [Hi]gh-order [Li]brary for [P]DEs  Latest
Parallel High-Order Library for PDEs through hp-adaptive Discontinuous Galerkin methods
inner_vol_parameterization.cpp
1 #include "inner_vol_parameterization.hpp"
2 #include <deal.II/dofs/dof_tools.h>
3 #include <deal.II/lac/dynamic_sparsity_pattern.h>
4 #include <deal.II/lac/sparsity_tools.h>
5 
6 namespace PHiLiP {
7 
8 template<int dim>
10  std::shared_ptr<HighOrderGrid<dim,double>> _high_order_grid)
11  : BaseParameterization<dim>(_high_order_grid)
12 {
14 }
15 
16 template<int dim>
18 {
19  unsigned int n_vol_nodes = this->high_order_grid->volume_nodes.size();
20  unsigned int n_surf_nodes = this->high_order_grid->surface_nodes.size();
21  n_inner_nodes = n_vol_nodes - n_surf_nodes;
22 
23  dealii::LinearAlgebra::distributed::Vector<int> is_a_surface_node;
24  is_a_surface_node.reinit(this->high_order_grid->volume_nodes); // Copies parallel layout, without values. Initializes to 0 by default.
25 
26  // Get locally owned volume and surface ranges of indices held by current processor.
27  const dealii::IndexSet &volume_range = this->high_order_grid->volume_nodes.get_partitioner()->locally_owned_range();
28  const dealii::IndexSet &surface_range = this->high_order_grid->surface_nodes.get_partitioner()->locally_owned_range();
29 
30  // Set is_a_surface_node. Makes it easier to iterate over inner nodes later.
31  // Using surface_range.begin() and surface_range.end() might make it quicker to iterate over local ranges. To be checked later.
32  for(unsigned int i_surf = 0; i_surf<n_surf_nodes; ++i_surf)
33  {
34  if(!(surface_range.is_element(i_surf))) continue;
35 
36  const unsigned int vol_index = this->high_order_grid->surface_to_volume_indices(i_surf);
37  Assert(volume_range.is_element(vol_index),
38  dealii::ExcMessage("Surface index is in range, so vol index is expected to be in the range of this processor."));
39  is_a_surface_node(vol_index) = 1;
40  }
41  is_a_surface_node.update_ghost_values();
42 
43  //=========== Set inner_vol_range IndexSet of current processor ================================================================
44 
45  unsigned int n_elements_this_mpi = volume_range.n_elements() - surface_range.n_elements(); // Size of local indexset
46  std::vector<unsigned int> n_elements_per_mpi(this->n_mpi);
47  MPI_Allgather(&n_elements_this_mpi, 1, MPI_UNSIGNED, &(n_elements_per_mpi[0]), 1, MPI_UNSIGNED, this->mpi_communicator);
48 
49  // Set lower index and hgher index of locally owned IndexSet on each processor
50  unsigned int lower_index = 0, higher_index = 0;
51  for(int i_mpi = 0; i_mpi < this->mpi_rank; ++i_mpi)
52  {
53  lower_index += n_elements_per_mpi[i_mpi];
54  }
55  higher_index = lower_index + n_elements_this_mpi;
56 
58  inner_vol_range.add_range(lower_index, higher_index);
59 
60  //=========== Set inner_vol_index_to_vol_index ================================================================
61  inner_vol_index_to_vol_index.reinit(inner_vol_range, this->mpi_communicator); // No need of ghosts. To be verified later.
62 
63  unsigned int count1 = lower_index;
64  for(unsigned int i_vol = 0; i_vol < n_vol_nodes; ++i_vol)
65  {
66  if(!volume_range.is_element(i_vol)) continue;
67 
68  if(is_a_surface_node(i_vol)) continue;
69 
70  inner_vol_index_to_vol_index[count1++] = i_vol;
71  }
72  AssertDimension(count1, higher_index);
73 }
74 
75 template<int dim>
77 {
78  design_var.reinit(inner_vol_range, this->mpi_communicator);
79 
80  for(unsigned int i=0; i<n_inner_nodes; ++i)
81  {
82  if(inner_vol_range.is_element(i))
83  {
84  const unsigned int vol_index = inner_vol_index_to_vol_index[i];
85  design_var[i] = this->high_order_grid->volume_nodes[vol_index];
86  }
87  }
88  current_design_var = design_var;
89  design_var.update_ghost_values(); // Not required as there are no ghost values. Might be required later.
90  current_design_var.update_ghost_values();
91 }
92 
93 template<int dim>
95 {
96  const dealii::IndexSet &volume_range = this->high_order_grid->volume_nodes.get_partitioner()->locally_owned_range();
97  const unsigned int n_vol_nodes = this->high_order_grid->volume_nodes.size();
98 
99  dealii::DynamicSparsityPattern dsp(n_vol_nodes, n_inner_nodes, volume_range);
100  for(unsigned int i=0; i<n_inner_nodes; ++i)
101  {
102  if(! inner_vol_range.is_element(i)) continue;
103 
104  dsp.add(inner_vol_index_to_vol_index[i],i);
105  }
106 
107 
108  dealii::IndexSet locally_relevant_dofs;
109  dealii::DoFTools::extract_locally_relevant_dofs(this->high_order_grid->dof_handler_grid, locally_relevant_dofs);
110 
111  dealii::SparsityTools::distribute_sparsity_pattern(dsp, volume_range, this->mpi_communicator, locally_relevant_dofs);
112 
113  dXv_dXp.reinit(volume_range, inner_vol_range, dsp, this->mpi_communicator);
114 
115  for(unsigned int i=0; i<n_inner_nodes; ++i)
116  {
117  if(! inner_vol_range.is_element(i)) continue;
118 
119  dXv_dXp.set(inner_vol_index_to_vol_index[i], i, 1.0);
120  }
121 
122  dXv_dXp.compress(dealii::VectorOperation::insert);
123 }
124 
125 template<int dim>
127  const MatrixType &dXv_dXp,
128  const VectorType &design_var)
129 {
130  AssertDimension(dXv_dXp.n(), design_var.size());
131 
132  // check if design variables have changed.
133  bool design_variable_has_changed = this->has_design_variable_been_updated(current_design_var, design_var);
134  bool mesh_updated;
135  if(!(design_variable_has_changed))
136  {
137  mesh_updated = false;
138  return mesh_updated;
139  }
140  VectorType change_in_des_var = design_var;
141  change_in_des_var -= current_design_var;
142 
143  current_design_var = design_var;
144  dXv_dXp.vmult_add(this->high_order_grid->volume_nodes, change_in_des_var); // Xv = Xv + dXv_dXp*(Xp,new - Xp); Gives Xv for surface nodes and Xp,new for inner vol nodes.
145  mesh_updated = true;
146  return mesh_updated;
147 }
148 
149 template<int dim>
151 {
152  return n_inner_nodes;
153 }
154 
156 } // namespace PHiLiP
int n_mpi
Total no. of processors.
InnerVolParameterization(std::shared_ptr< HighOrderGrid< dim, double >> _high_order_grid)
Constructor.
int mpi_rank
Processor# of current processor.
void compute_dXv_dXp(MatrixType &dXv_dXp) const override
Computes the derivative of volume nodes w.r.t. inner volume nodes. Overrides the virtual function in ...
void compute_inner_vol_index_to_vol_index()
Computes inner_vol_range IndexSet on each processor, along with n_inner_nodes and inner_vol_index_to_...
bool has_design_variable_been_updated(const VectorType &current_design_var, const VectorType &updated_design_var) const
Checks if the design variable has changed.
dealii::IndexSet inner_vol_range
Local indices of inner volume nodes.
Files for the baseline physics.
Definition: ADTypes.hpp:10
dealii::TrilinosWrappers::SparseMatrix MatrixType
Alias for dealii::TrilinosWrappers::SparseMatrix.
void initialize_design_variables(VectorType &design_var) override
Initializes design variables with inner volume nodes and set locally owned and ghost indices...
VectorType current_design_var
Current design variable. Stored to prevent recomputing if it is unchanged.
Abstract class for design parameterization. Objective function and the constraints take this class&#39;s ...
unsigned int n_inner_nodes
No. of inner volume nodes.
MPI_Comm mpi_communicator
Alias for MPI_COMM_WORLD.
std::shared_ptr< HighOrderGrid< dim, double > > high_order_grid
Pointer to high order grid.
dealii::LinearAlgebra::distributed::Vector< int > inner_vol_index_to_vol_index
Converts inner volume index to global index of volume nodes.
unsigned int get_number_of_design_variables() const override
Returns the number of design variables (i.e. no. of inner volume nodes).
dealii::LinearAlgebra::distributed::Vector< double > VectorType
Alias for dealii&#39;s parallel distributed vector.
bool update_mesh_from_design_variables(const MatrixType &dXv_dXp, const VectorType &design_var) override
Checks if the design variables have changed and updates inner volume nodes of high order grid...
Design parameterization w.r.t. inner volume nodes (i.e. volume nodes excluding those on the boundary)...