1441 *
const double g = 9.81;
1442 *
const double rho = 7700;
1445 * values(dim - 1) = -rho * g;
1450 *
template <
int dim>
1451 *
void BodyForce<dim>::vector_value_list(
1455 *
const unsigned int n_points = points.size();
1459 *
for (
unsigned int p = 0; p < n_points; ++p)
1460 * BodyForce<dim>::vector_value(points[p], value_list[p]);
1468 * <a name=
"step_18-ThecodeIncrementalBoundaryValuecodeclass"></a>
1469 * <h3>The <code>IncrementalBoundaryValue</code>
class</h3>
1473 * In addition to body forces, movement can be induced by boundary forces
1474 * and forced boundary displacement. The latter
case is equivalent to forces
1475 * being chosen in such a way that they induce certain displacement.
1479 * For quasistatic displacement, typical boundary forces would be pressure
1480 * on a body, or tangential friction against another body. We chose a
1481 * somewhat simpler
case here: we prescribe a certain movement of (parts of)
1482 * the boundary, or at least of certain components of the displacement
1483 * vector. We describe
this by another vector-valued function that,
for a
1484 * given
point on the boundary, returns the prescribed displacement.
1488 * Since we have a time-dependent problem, the displacement increment of the
1489 * boundary equals the displacement accumulated during the length of the
1490 * timestep. The
class therefore has to know both the present time and the
1491 * length of the present time step, and can then
approximate the incremental
1492 * displacement as the present velocity times the present timestep.
1496 * For the purposes of
this program, we choose a simple form of boundary
1497 * displacement: we displace the top boundary with
constant velocity
1498 * downwards. The rest of the boundary is either going to be fixed (and is
1499 * then described
using an
object of type
1501 * nothing special has to be done). The implementation of the
class
1502 * describing the
constant downward motion should then be obvious
using the
1503 * knowledge we gained through all the previous example programs:
1506 *
template <
int dim>
1507 *
class IncrementalBoundaryValues :
public Function<dim>
1510 * IncrementalBoundaryValues(
const double present_time,
1511 *
const double present_timestep);
1514 * Vector<double> &values)
const override;
1518 * std::vector<Vector<double>> &value_list)
const override;
1521 *
const double velocity;
1522 *
const double present_time;
1523 *
const double present_timestep;
1527 *
template <
int dim>
1528 * IncrementalBoundaryValues<dim>::IncrementalBoundaryValues(
1529 *
const double present_time,
1530 *
const double present_timestep)
1533 * , present_time(present_time)
1534 * , present_timestep(present_timestep)
1538 *
template <
int dim>
1540 * IncrementalBoundaryValues<dim>::vector_value(
const Point<dim> & ,
1546 * values(2) = -present_timestep * velocity;
1551 *
template <
int dim>
1552 *
void IncrementalBoundaryValues<dim>::vector_value_list(
1556 *
const unsigned int n_points = points.size();
1560 *
for (
unsigned int p = 0; p < n_points; ++p)
1561 * IncrementalBoundaryValues<dim>::vector_value(points[p], value_list[p]);
1569 * <a name=
"step_18-ImplementationofthecodeTopLevelcodeclass"></a>
1570 * <h3>Implementation of the <code>TopLevel</code>
class</h3>
1574 * Now
for the implementation of the main
class. First, we initialize the
1575 * stress-strain tensor, which we have declared as a
static const
1576 * variable. We chose Lamé constants that are appropriate
for steel:
1579 *
template <
int dim>
1581 * get_stress_strain_tensor<dim>( 9.695e10,
1589 * <a name=
"step_18-Thepublicinterface"></a>
1590 * <h4>The
public interface</h4>
1594 * The next step is the definition of constructors and destructors. There
1595 * are no surprises here: we choose linear and continuous finite elements
1596 *
for each of the <code>dim</code> vector components of the solution, and a
1597 * Gaussian quadrature formula with 2 points in each coordinate
1598 * direction. The destructor should be obvious:
1601 *
template <
int dim>
1602 * TopLevel<dim>::TopLevel()
1603 * : triangulation(MPI_COMM_WORLD)
1605 * , dof_handler(triangulation)
1606 * , quadrature_formula(fe.degree + 1)
1607 * , present_time(0.0)
1608 * , present_timestep(1.0)
1611 * , mpi_communicator(MPI_COMM_WORLD)
1614 * , pcout(std::cout, this_mpi_process == 0)
1619 *
template <
int dim>
1620 * TopLevel<dim>::~TopLevel()
1622 * dof_handler.clear();
1629 * The last of the
public functions is the
one that directs all the work,
1630 * <code>
run()</code>. It initializes the variables that describe where in
1631 * time we presently are, then runs the first time step, then loops over all
1632 * the other time steps. Note that
for simplicity we use a fixed time step,
1633 * whereas a more sophisticated program would of course have to choose it in
1634 * some more reasonable way adaptively:
1637 *
template <
int dim>
1638 *
void TopLevel<dim>::run()
1640 * do_initial_timestep();
1642 *
while (present_time < end_time)
1650 * <a name=
"step_18-TopLevelcreate_coarse_grid"></a>
1651 * <h4>TopLevel::create_coarse_grid</h4>
1655 * The next function in the order in which they were declared above is the
1656 *
one that creates the coarse grid from which we start. For
this example
1657 * program, we want to compute the deformation of a
cylinder under axial
1658 * compression. The first step therefore is to generate a mesh
for a
1659 *
cylinder of length 3 and with inner and outer radii of 0.8 and 1,
1660 * respectively. Fortunately, there is a library function
for such a mesh.
1664 * In a second step, we have to associated boundary conditions with the
1665 * upper and lower faces of the
cylinder. We choose a boundary indicator of
1666 * 0
for the boundary faces that are characterized by their midpoints having
1667 * z-coordinates of either 0 (bottom face), an indicator of 1
for z=3 (top
1668 * face);
finally, we use boundary indicator 2
for all faces on the
inside
1672 *
template <
int dim>
1673 *
void TopLevel<dim>::create_coarse_grid()
1675 *
const double inner_radius = 0.8, outer_radius = 1;
1677 *
for (
const auto &cell : triangulation.active_cell_iterators())
1678 *
for (
const auto &face : cell->face_iterators())
1679 *
if (face->at_boundary())
1681 *
const Point<dim> face_center = face->center();
1683 *
if (face_center[2] == 0)
1684 * face->set_boundary_id(0);
1685 *
else if (face_center[2] == 3)
1686 * face->set_boundary_id(1);
1687 *
else if (
std::sqrt(face_center[0] * face_center[0] +
1688 * face_center[1] * face_center[1]) <
1689 * (inner_radius + outer_radius) / 2)
1690 * face->set_boundary_id(2);
1692 * face->set_boundary_id(3);
1697 * Once all
this is done, we can
refine the mesh once globally:
1700 * triangulation.refine_global(1);
1704 * As the
final step, we need to set up a clean state of the data that we
1705 * store in the quadrature points on all cells that are treated on the
1706 * present processor.
1709 * setup_quadrature_point_history();
1717 * <a name=
"step_18-TopLevelsetup_system"></a>
1718 * <h4>TopLevel::setup_system</h4>
1722 * The next function is the
one that sets up the data structures
for a given
1723 * mesh. This is done in most the same way as in @ref step_17
"step-17": distribute the
1724 * degrees of freedom, then sort these degrees of freedom in such a way that
1725 * each processor gets a
contiguous chunk of them. Note that subdivisions into
1726 * chunks
for each processor is handled in the
functions that create or
1727 *
refine grids, unlike in the previous example program (the point where
1728 *
this happens is mostly a matter of taste; here, we chose to
do it when
1729 * grids are created since in the <code>do_initial_timestep</code> and
1730 * <code>do_timestep</code>
functions we want to output the number of cells
1731 * on each processor at a
point where we haven
't called the present function
1735 * template <int dim>
1736 * void TopLevel<dim>::setup_system()
1738 * dof_handler.distribute_dofs(fe);
1739 * locally_owned_dofs = dof_handler.locally_owned_dofs();
1740 * locally_relevant_dofs =
1741 * DoFTools::extract_locally_relevant_dofs(dof_handler);
1745 * The next step is to set up constraints due to hanging nodes. This has
1746 * been handled many times before:
1749 * hanging_node_constraints.clear();
1750 * DoFTools::make_hanging_node_constraints(dof_handler,
1751 * hanging_node_constraints);
1752 * hanging_node_constraints.close();
1756 * And then we have to set up the matrix. Here we deviate from @ref step_17 "step-17", in
1757 * which we simply used PETSc's ability to just know about the size of the
1758 *
matrix and later allocate those
nonzero elements that are being written
1759 * to. While
this works just fine from a correctness viewpoint, it is not
1760 * at all efficient:
if we don
't give PETSc a clue as to which elements
1761 * are written to, it is (at least at the time of this writing) unbearably
1762 * slow when we set the elements in the matrix for the first time (i.e. in
1763 * the first timestep). Later on, when the elements have been allocated,
1764 * everything is much faster. In experiments we made, the first timestep
1765 * can be accelerated by almost two orders of magnitude if we instruct
1766 * PETSc which elements will be used and which are not.
1770 * To do so, we first generate the sparsity pattern of the matrix we are
1771 * going to work with, and make sure that the condensation of hanging node
1772 * constraints add the necessary additional entries in the sparsity
1776 * DynamicSparsityPattern sparsity_pattern(locally_relevant_dofs);
1777 * DoFTools::make_sparsity_pattern(dof_handler,
1779 * hanging_node_constraints,
1780 * /*keep constrained dofs*/ false);
1781 * SparsityTools::distribute_sparsity_pattern(sparsity_pattern,
1782 * locally_owned_dofs,
1784 * locally_relevant_dofs);
1787 * Note that we have used the <code>DynamicSparsityPattern</code> class
1788 * here that was already introduced in @ref step_11 "step-11", rather than the
1789 * <code>SparsityPattern</code> class that we have used in all other
1790 * cases. The reason for this is that for the latter class to work we have
1791 * to give an initial upper bound for the number of entries in each row, a
1792 * task that is traditionally done by
1793 * <code>DoFHandler::max_couplings_between_dofs()</code>. However, this
1794 * function suffers from a serious problem: it has to compute an upper
1795 * bound to the number of nonzero entries in each row, and this is a
1796 * rather complicated task, in particular in 3d. In effect, while it is
1797 * quite accurate in 2d, it often comes up with much too large a number in
1798 * 3d, and in that case the <code>SparsityPattern</code> allocates much
1799 * too much memory at first, often several 100 MBs. This is later
1800 * corrected when <code>DoFTools::make_sparsity_pattern</code> is called
1801 * and we realize that we don't need all that much memory, but at time it
1802 * is already too late:
for large problems, the temporary allocation of
1803 * too much memory can lead to out-of-memory situations.
1807 * In order to avoid
this, we resort to the
1809 * not require any up-front estimate on the number of
nonzero entries per
1810 * row. It therefore only ever allocates as much memory as it needs at any
1811 * given time, and we can build it even
for large 3
d problems.
1815 * It is also worth noting that due to the specifics of
1817 * global, i.e. comprises all degrees of freedom whether they will be
1818 * owned by the processor we are on or another
one (in
case this program
1819 * is run in %
parallel via
MPI). This of course is not optimal -- it
1820 * limits the size of the problems we can solve, since storing the entire
1821 * sparsity pattern (even
if only
for a
short time) on each processor does
1822 * not
scale well. However, there are several more places in the program
1823 * in which we
do this,
for example we
always keep the global
1824 * triangulation and DoF handler objects around, even
if we only work on
1825 * part of them. At present, deal.II does not have the necessary
1826 * facilities to completely distribute these objects (a task that, indeed,
1827 * is very hard to achieve with adaptive meshes, since well-balanced
1828 * subdivisions of a domain tend to become unbalanced as the mesh is
1829 * adaptively refined).
1833 * With
this data structure, we can then go to the
PETSc sparse
matrix and
1834 * tell it to preallocate all the entries we will later want to write to:
1837 * system_matrix.reinit(locally_owned_dofs,
1838 * locally_owned_dofs,
1840 * mpi_communicator);
1843 * After
this point, no further
explicit knowledge of the sparsity pattern
1844 * is required any more and we can let the <code>sparsity_pattern</code>
1845 * variable go out of scope without any problem.
1849 * The last task in
this function is then only to reset the right hand
1850 * side vector as well as the solution vector to its correct size;
1851 * remember that the solution vector is a local
one, unlike the right hand
1852 * side that is a distributed %
parallel one and therefore needs to know
1853 * the
MPI communicator over which it is supposed to transmit messages:
1856 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1857 * incremental_displacement.reinit(dof_handler.n_dofs());
1865 * <a name=
"step_18-TopLevelassemble_system"></a>
1866 * <h4>TopLevel::assemble_system</h4>
1870 * Again, assembling the system
matrix and right hand side follows the same
1871 * structure as in many example programs before. In particular, it is mostly
1872 * equivalent to @ref step_17
"step-17", except
for the different right hand side that now
1873 * only has to take into account
internal stresses. In addition, assembling
1874 * the
matrix is made significantly more transparent by
using the
1875 * <code>
SymmetricTensor</code>
class: note the elegance of forming the
1876 *
scalar products of
symmetric tensors of rank 2 and 4. The implementation
1878 * may not be
using an isotropic elasticity tensor.
1882 * The first part of the assembly routine is as
always:
1885 *
template <
int dim>
1886 *
void TopLevel<dim>::assemble_system()
1889 * system_matrix = 0;
1892 * quadrature_formula,
1896 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1897 *
const unsigned int n_q_points = quadrature_formula.size();
1902 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1904 * BodyForce<dim> body_force;
1905 * std::vector<Vector<double>> body_force_values(n_q_points,
1910 * As in @ref step_17
"step-17", we only need to
loop over all cells that belong to the
1911 * present processor:
1914 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1915 *
if (cell->is_locally_owned())
1920 * fe_values.reinit(cell);
1924 * Then
loop over all indices i,j and quadrature points and
assemble
1925 * the system
matrix contributions from
this cell. Note how we
1927 * at a given quadrature
point from the <code>
FEValues</code>
1928 * object, and the elegance with which we form the triple
1929 * contraction <code>eps_phi_i :
C : eps_phi_j</code>; the latter
1930 * needs to be compared to the clumsy computations needed in
1931 * @ref step_17
"step-17", both in the introduction as well as in the respective
1932 * place in the program:
1935 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1936 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1937 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1940 * eps_phi_i = get_strain(fe_values, i, q_point),
1941 * eps_phi_j = get_strain(fe_values, j, q_point);
1944 * stress_strain_tensor *
1947 * fe_values.JxW(q_point);
1953 * Then also
assemble the local right hand side contributions. For
1954 *
this, we need to access the prior stress
value in
this quadrature
1955 *
point. To get it, we use the user pointer of
this cell that
1956 * points into the global array to the quadrature
point data
1957 * corresponding to the first quadrature
point of the present cell,
1958 * and then add an offset corresponding to the
index of the
1959 * quadrature
point we presently consider:
1962 *
const PointHistory<dim> *local_quadrature_points_data =
1963 *
reinterpret_cast<PointHistory<dim> *
>(cell->user_pointer());
1966 * In addition, we need the values of the external body forces at
1967 * the quadrature points on
this cell:
1970 * body_force.vector_value_list(fe_values.get_quadrature_points(),
1971 * body_force_values);
1974 * Then we can
loop over all degrees of freedom on
this cell and
1975 * compute local contributions to the right hand side:
1978 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1980 *
const unsigned int component_i =
1981 * fe.system_to_component_index(i).first;
1983 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1986 * local_quadrature_points_data[q_point].old_stress;
1989 * (body_force_values[q_point](component_i) *
1990 * fe_values.shape_value(i, q_point) -
1991 * old_stress * get_strain(fe_values, i, q_point)) *
1992 * fe_values.JxW(q_point);
1998 * Now that we have the local contributions to the linear system, we
1999 * need to transfer it into the global objects. This is done exactly
2000 * as in @ref step_17
"step-17":
2003 * cell->get_dof_indices(local_dof_indices);
2005 * hanging_node_constraints.distribute_local_to_global(cell_matrix,
2007 * local_dof_indices,
2023 * The last step is to again fix up boundary values, just as we already
2024 * did in previous programs.
A slight complication is that the
2026 * vector compatible with the
matrix and right hand side (i.e. here a
2027 * distributed %
parallel vector, rather than the sequential vector we use
2028 * in
this program) in order to preset the entries of the solution vector
2029 * with the correct boundary values. We provide such a compatible vector
2030 * in the form of a temporary vector which we then
copy into the
2035 * We make up
for this complication by showing how boundary values can be
2036 * used flexibly: following the way we create the triangulation, there are
2037 * three distinct boundary indicators used to describe the domain,
2038 * corresponding to the bottom and top faces, as well as the inner/outer
2039 * surfaces. We would like to impose boundary conditions of the following
2040 * type: The inner and outer
cylinder surfaces are free of external
2041 * forces, a fact that corresponds to natural (Neumann-type) boundary
2042 * conditions
for which we don
't have to do anything. At the bottom, we
2043 * want no movement at all, corresponding to the cylinder being clamped or
2044 * cemented in at this part of the boundary. At the top, however, we want
2045 * a prescribed vertical downward motion compressing the cylinder; in
2046 * addition, we only want to restrict the vertical movement, but not the
2047 * horizontal ones -- one can think of this situation as a well-greased
2048 * plate sitting on top of the cylinder pushing it downwards: the atoms of
2049 * the cylinder are forced to move downward, but they are free to slide
2050 * horizontally along the plate.
2054 * The way to describe this is as follows: for boundary indicator zero
2055 * (bottom face) we use a dim-dimensional zero function representing no
2056 * motion in any coordinate direction. For the boundary with indicator 1
2057 * (top surface), we use the <code>IncrementalBoundaryValues</code> class,
2058 * but we specify an additional argument to the
2059 * <code>VectorTools::interpolate_boundary_values</code> function denoting
2060 * which vector components it should apply to; this is a vector of bools
2061 * for each vector component and because we only want to restrict vertical
2062 * motion, it has only its last component set:
2065 * const FEValuesExtractors::Scalar z_component(dim - 1);
2066 * std::map<types::global_dof_index, double> boundary_values;
2067 * VectorTools::interpolate_boundary_values(dof_handler,
2069 * Functions::ZeroFunction<dim>(dim),
2071 * VectorTools::interpolate_boundary_values(
2074 * IncrementalBoundaryValues<dim>(present_time, present_timestep),
2076 * fe.component_mask(z_component));
2078 * PETScWrappers::MPI::Vector tmp(locally_owned_dofs, mpi_communicator);
2079 * MatrixTools::apply_boundary_values(
2080 * boundary_values, system_matrix, tmp, system_rhs, false);
2081 * incremental_displacement = tmp;
2089 * <a name="step_18-TopLevelsolve_timestep"></a>
2090 * <h4>TopLevel::solve_timestep</h4>
2094 * The next function is the one that controls what all has to happen within
2095 * a timestep. The order of things should be relatively self-explanatory
2096 * from the function names:
2099 * template <int dim>
2100 * void TopLevel<dim>::solve_timestep()
2102 * pcout << " Assembling system..." << std::flush;
2103 * assemble_system();
2104 * pcout << " norm of rhs is " << system_rhs.l2_norm() << std::endl;
2106 * const unsigned int n_iterations = solve_linear_problem();
2108 * pcout << " Solver converged in " << n_iterations << " iterations."
2111 * pcout << " Updating quadrature point data..." << std::flush;
2112 * update_quadrature_point_history();
2113 * pcout << std::endl;
2121 * <a name="step_18-TopLevelsolve_linear_problem"></a>
2122 * <h4>TopLevel::solve_linear_problem</h4>
2126 * Solving the linear system again works mostly as before. The only
2127 * difference is that we want to only keep a complete local copy of the
2128 * solution vector instead of the distributed one that we get as output from
2129 * PETSc's solver routines. To
this end, we declare a local temporary
2130 * variable
for the distributed vector and initialize it with the contents
2131 * of the local variable (remember that the
2132 * <code>apply_boundary_values</code> function called in
2133 * <code>assemble_system</code> preset the values of boundary nodes in
this
2134 * vector), solve with it, and at the
end of the function
copy it again into
2135 * the complete local vector that we declared as a member variable. Hanging
2136 * node constraints are then distributed only on the local
copy,
2137 * i.e. independently of each other on each of the processors:
2140 *
template <
int dim>
2141 *
unsigned int TopLevel<dim>::solve_linear_problem()
2144 * locally_owned_dofs, mpi_communicator);
2145 * distributed_incremental_displacement = incremental_displacement;
2148 * 1e-16 * system_rhs.l2_norm());
2154 * cg.solve(system_matrix,
2155 * distributed_incremental_displacement,
2159 * incremental_displacement = distributed_incremental_displacement;
2161 * hanging_node_constraints.distribute(incremental_displacement);
2163 *
return solver_control.last_step();
2171 * <a name=
"step_18-TopLeveloutput_results"></a>
2172 * <h4>TopLevel::output_results</h4>
2176 * This function generates the graphical output in .vtu format as explained
2177 * in the introduction. Each process will only work on the cells it owns,
2178 * and then write the result into a file of its own. Additionally, processor
2179 * 0 will write the record files the reference all the .vtu files.
2183 * The crucial part of
this function is to give the <code>
DataOut</code>
2184 *
class a way to only work on the cells that the present process owns.
2190 *
template <
int dim>
2191 *
void TopLevel<dim>::output_results() const
2198 * Then, just as in @ref step_17
"step-17", define the names of solution variables (which
2199 * here are the displacement increments) and queue the solution vector
for
2200 * output. Note in the following
switch how we make sure that
if the space
2201 *
dimension should be unhandled that we
throw an exception saying that we
2202 * haven
't implemented this case yet (another case of defensive
2206 * std::vector<std::string> solution_names;
2210 * solution_names.emplace_back("delta_x");
2213 * solution_names.emplace_back("delta_x");
2214 * solution_names.emplace_back("delta_y");
2217 * solution_names.emplace_back("delta_x");
2218 * solution_names.emplace_back("delta_y");
2219 * solution_names.emplace_back("delta_z");
2222 * DEAL_II_NOT_IMPLEMENTED();
2225 * data_out.add_data_vector(incremental_displacement, solution_names);
2230 * The next thing is that we wanted to output something like the average
2231 * norm of the stresses that we have stored in each cell. This may seem
2232 * complicated, since on the present processor we only store the stresses
2233 * in quadrature points on those cells that actually belong to the present
2234 * process. In other words, it seems as if we can't compute the average
2235 * stresses
for all cells. However, remember that our
class derived from
2236 * <code>
DataOut</code> only iterates over those cells that actually
do
2237 * belong to the present processor, i.e. we don
't have to compute anything
2238 * for all the other cells as this information would not be touched. The
2239 * following little loop does this. We enclose the entire block into a
2240 * pair of braces to make sure that the iterator variables do not remain
2241 * accidentally visible beyond the end of the block in which they are
2245 * Vector<double> norm_of_stress(triangulation.n_active_cells());
2249 * Loop over all the cells...
2252 * for (auto &cell : triangulation.active_cell_iterators())
2253 * if (cell->is_locally_owned())
2257 * On these cells, add up the stresses over all quadrature
2261 * SymmetricTensor<2, dim> accumulated_stress;
2262 * for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2263 * accumulated_stress +=
2264 * reinterpret_cast<PointHistory<dim> *>(cell->user_pointer())[q]
2269 * ...then write the norm of the average to their destination:
2272 * norm_of_stress(cell->active_cell_index()) =
2273 * (accumulated_stress / quadrature_formula.size()).norm();
2277 * And on the cells that we are not interested in, set the respective
2278 * value in the vector to a bogus value (norms must be positive, and a
2279 * large negative value should catch your eye) in order to make sure
2280 * that if we were somehow wrong about our assumption that these
2281 * elements would not appear in the output file, that we would find out
2282 * by looking at the graphical output:
2286 * norm_of_stress(cell->active_cell_index()) = -1e+20;
2290 * Finally attach this vector as well to be treated for output:
2293 * data_out.add_data_vector(norm_of_stress, "norm_of_stress");
2297 * As a last piece of data, let us also add the partitioning of the domain
2298 * into subdomains associated with the processors if this is a parallel
2299 * job. This works in the exact same way as in the @ref step_17 "step-17" program:
2302 * std::vector<types::subdomain_id> partition_int(
2303 * triangulation.n_active_cells());
2304 * GridTools::get_subdomain_association(triangulation, partition_int);
2305 * const Vector<double> partitioning(partition_int.begin(),
2306 * partition_int.end());
2307 * data_out.add_data_vector(partitioning, "partitioning");
2311 * Finally, with all this data, we can instruct deal.II to munge the
2312 * information and produce some intermediate data structures that contain
2313 * all these solution and other data vectors:
2316 * data_out.build_patches();
2320 * Let us call a function that opens the necessary output files and writes
2321 * the data we have generated into them. The function automatically
2322 * constructs the file names from the given directory name (the first
2323 * argument) and file name base (second argument). It augments the resulting
2324 * string by pieces that result from the time step number and a "piece
2325 * number" that corresponds to a part of the overall domain that can consist
2326 * of one or more subdomains.
2330 * The function also writes a record files (with suffix `.pvd`) for Paraview
2331 * that describes how all of these output files combine into the data for
2332 * this single time step:
2335 * const std::string pvtu_filename = data_out.write_vtu_with_pvtu_record(
2336 * "./", "solution", timestep_no, mpi_communicator, 4);
2340 * The record files must be written only once and not by each processor,
2341 * so we do this on processor 0:
2344 * if (this_mpi_process == 0)
2348 * Finally, we write the paraview record, that references all .pvtu
2349 * files and their respective time. Note that the variable
2350 * times_and_names is declared static, so it will retain the entries
2351 * from the previous timesteps.
2354 * static std::vector<std::pair<double, std::string>> times_and_names;
2355 * times_and_names.emplace_back(present_time, pvtu_filename);
2356 * std::ofstream pvd_output("solution.pvd");
2357 * DataOutBase::write_pvd_record(pvd_output, times_and_names);
2366 * <a name="step_18-TopLeveldo_initial_timestep"></a>
2367 * <h4>TopLevel::do_initial_timestep</h4>
2371 * This and the next function handle the overall structure of the first and
2372 * following timesteps, respectively. The first timestep is slightly more
2373 * involved because we want to compute it multiple times on successively
2374 * refined meshes, each time starting from a clean state. At the end of
2375 * these computations, in which we compute the incremental displacements
2376 * each time, we use the last results obtained for the incremental
2377 * displacements to compute the resulting stress updates and move the mesh
2378 * accordingly. On this new mesh, we then output the solution and any
2379 * additional data we consider important.
2383 * All this is interspersed by generating output to the console to update
2384 * the person watching the screen on what is going on. As in @ref step_17 "step-17", the
2385 * use of <code>pcout</code> instead of <code>std::cout</code> makes sure
2386 * that only one of the parallel processes is actually writing to the
2387 * console, without having to explicitly code an if-statement in each place
2388 * where we generate output:
2391 * template <int dim>
2392 * void TopLevel<dim>::do_initial_timestep()
2394 * present_time += present_timestep;
2396 * pcout << "Timestep " << timestep_no << " at time " << present_time
2399 * for (unsigned int cycle = 0; cycle < 2; ++cycle)
2401 * pcout << " Cycle " << cycle << ':
' << std::endl;
2404 * create_coarse_grid();
2406 * refine_initial_grid();
2408 * pcout << " Number of active cells: "
2409 * << triangulation.n_active_cells() << " (by partition:";
2410 * for (unsigned int p = 0; p < n_mpi_processes; ++p)
2411 * pcout << (p == 0 ? ' ' : '+
')
2412 * << (GridTools::count_cells_with_subdomain_association(
2413 * triangulation, p));
2414 * pcout << ')
' << std::endl;
2418 * pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
2419 * << " (by partition:";
2420 * for (unsigned int p = 0; p < n_mpi_processes; ++p)
2421 * pcout << (p == 0 ? ' ' : '+
')
2422 * << (DoFTools::count_dofs_with_subdomain_association(dof_handler,
2424 * pcout << ')
' << std::endl;
2432 * pcout << std::endl;
2440 * <a name="step_18-TopLeveldo_timestep"></a>
2441 * <h4>TopLevel::do_timestep</h4>
2445 * Subsequent timesteps are simpler, and probably do not require any more
2446 * documentation given the explanations for the previous function above:
2449 * template <int dim>
2450 * void TopLevel<dim>::do_timestep()
2452 * present_time += present_timestep;
2454 * pcout << "Timestep " << timestep_no << " at time " << present_time
2456 * if (present_time > end_time)
2458 * present_timestep -= (present_time - end_time);
2459 * present_time = end_time;
2468 * pcout << std::endl;
2475 * <a name="step_18-TopLevelrefine_initial_grid"></a>
2476 * <h4>TopLevel::refine_initial_grid</h4>
2480 * The following function is called when solving the first time step on
2481 * successively refined meshes. After each iteration, it computes a
2482 * refinement criterion, refines the mesh, and sets up the history variables
2483 * in each quadrature point again to a clean state.
2486 * template <int dim>
2487 * void TopLevel<dim>::refine_initial_grid()
2491 * First, let each process compute error indicators for the cells it owns:
2494 * Vector<float> error_per_cell(triangulation.n_active_cells());
2495 * KellyErrorEstimator<dim>::estimate(
2497 * QGauss<dim - 1>(fe.degree + 1),
2498 * std::map<types::boundary_id, const Function<dim> *>(),
2499 * incremental_displacement,
2503 * MultithreadInfo::n_threads(),
2504 * this_mpi_process);
2508 * Then set up a global vector into which we merge the local indicators
2509 * from each of the %parallel processes:
2512 * const unsigned int n_local_cells =
2513 * triangulation.n_locally_owned_active_cells();
2515 * PETScWrappers::MPI::Vector distributed_error_per_cell(
2516 * mpi_communicator, triangulation.n_active_cells(), n_local_cells);
2518 * for (unsigned int i = 0; i < error_per_cell.size(); ++i)
2519 * if (error_per_cell(i) != 0)
2520 * distributed_error_per_cell(i) = error_per_cell(i);
2521 * distributed_error_per_cell.compress(VectorOperation::insert);
2525 * Once we have that, copy it back into local copies on all processors and
2526 * refine the mesh accordingly:
2529 * error_per_cell = distributed_error_per_cell;
2530 * GridRefinement::refine_and_coarsen_fixed_number(triangulation,
2534 * triangulation.execute_coarsening_and_refinement();
2538 * Finally, set up quadrature point data again on the new mesh, and only
2539 * on those cells that we have determined to be ours:
2542 * setup_quadrature_point_history();
2550 * <a name="step_18-TopLevelmove_mesh"></a>
2551 * <h4>TopLevel::move_mesh</h4>
2555 * At the end of each time step, we move the nodes of the mesh according to
2556 * the incremental displacements computed in this time step. To do this, we
2557 * keep a vector of flags that indicate for each vertex whether we have
2558 * already moved it around, and then loop over all cells and move those
2559 * vertices of the cell that have not been moved yet. It is worth noting
2560 * that it does not matter from which of the cells adjacent to a vertex we
2561 * move this vertex: since we compute the displacement using a continuous
2562 * finite element, the displacement field is continuous as well and we can
2563 * compute the displacement of a given vertex from each of the adjacent
2564 * cells. We only have to make sure that we move each node exactly once,
2565 * which is why we keep the vector of flags.
2569 * There are two noteworthy things in this function. First, how we get the
2570 * displacement field at a given vertex using the
2571 * <code>cell-@>vertex_dof_index(v,d)</code> function that returns the index
2572 * of the <code>d</code>th degree of freedom at vertex <code>v</code> of the
2573 * given cell. In the present case, displacement in the k-th coordinate
2574 * direction corresponds to the k-th component of the finite element. Using a
2575 * function like this bears a certain risk, because it uses knowledge of the
2576 * order of elements that we have taken together for this program in the
2577 * <code>FESystem</code> element. If we decided to add an additional
2578 * variable, for example a pressure variable for stabilization, and happened
2579 * to insert it as the first variable of the element, then the computation
2580 * below will start to produce nonsensical results. In addition, this
2581 * computation rests on other assumptions: first, that the element we use
2582 * has, indeed, degrees of freedom that are associated with vertices. This
2583 * is indeed the case for the present Q1 element, as would be for all Qp
2584 * elements of polynomial order <code>p</code>. However, it would not hold
2585 * for discontinuous elements, or elements for mixed formulations. Secondly,
2586 * it also rests on the assumption that the displacement at a vertex is
2587 * determined solely by the value of the degree of freedom associated with
2588 * this vertex; in other words, all shape functions corresponding to other
2589 * degrees of freedom are zero at this particular vertex. Again, this is the
2590 * case for the present element, but is not so for all elements that are
2591 * presently available in deal.II. Despite its risks, we choose to use this
2592 * way in order to present a way to query individual degrees of freedom
2593 * associated with vertices.
2597 * In this context, it is instructive to point out what a more general way
2598 * would be. For general finite elements, the way to go would be to take a
2599 * quadrature formula with the quadrature points in the vertices of a
2600 * cell. The <code>QTrapezoid</code> formula for the trapezoidal rule does
2601 * exactly this. With this quadrature formula, we would then initialize an
2602 * <code>FEValues</code> object in each cell, and use the
2603 * <code>FEValues::get_function_values</code> function to obtain the values
2604 * of the solution function in the quadrature points, i.e. the vertices of
2605 * the cell. These are the only values that we really need, i.e. we are not
2606 * at all interested in the weights (or the <code>JxW</code> values)
2607 * associated with this particular quadrature formula, and this can be
2608 * specified as the last argument in the constructor to
2609 * <code>FEValues</code>. The only point of minor inconvenience in this
2610 * scheme is that we have to figure out which quadrature point corresponds
2611 * to the vertex we consider at present, as they may or may not be ordered
2612 * in the same order.
2616 * This inconvenience could be avoided if finite elements have support
2617 * points on vertices (which the one here has; for the concept of support
2618 * points, see @ref GlossSupport "support points"). For such a case, one
2619 * could construct a custom quadrature rule using
2620 * FiniteElement::get_unit_support_points(). The first
2621 * <code>cell->n_vertices()*fe.dofs_per_vertex</code>
2622 * quadrature points will then correspond to the vertices of the cell and
2623 * are ordered consistent with <code>cell-@>vertex(i)</code>, taking into
2624 * account that support points for vector elements will be duplicated
2625 * <code>fe.dofs_per_vertex</code> times.
2629 * Another point worth explaining about this short function is the way in
2630 * which the triangulation class exports information about its vertices:
2631 * through the <code>Triangulation::n_vertices</code> function, it
2632 * advertises how many vertices there are in the triangulation. Not all of
2633 * them are actually in use all the time -- some are left-overs from cells
2634 * that have been coarsened previously and remain in existence since deal.II
2635 * never changes the number of a vertex once it has come into existence,
2636 * even if vertices with lower number go away. Secondly, the location
2637 * returned by <code>cell-@>vertex(v)</code> is not only a read-only object
2638 * of type <code>Point@<dim@></code>, but in fact a reference that can be
2639 * written to. This allows to move around the nodes of a mesh with relative
2640 * ease, but it is worth pointing out that it is the responsibility of an
2641 * application program using this feature to make sure that the resulting
2642 * cells are still useful, i.e. are not distorted so much that the cell is
2643 * degenerated (indicated, for example, by negative Jacobians). Note that we
2644 * do not have any provisions in this function to actually ensure this, we
2649 * After this lengthy introduction, here are the full 20 or so lines of
2653 * template <int dim>
2654 * void TopLevel<dim>::move_mesh()
2656 * pcout << " Moving mesh..." << std::endl;
2658 * std::vector<bool> vertex_touched(triangulation.n_vertices(), false);
2659 * for (auto &cell : dof_handler.active_cell_iterators())
2660 * for (const auto v : cell->vertex_indices())
2661 * if (vertex_touched[cell->vertex_index(v)] == false)
2663 * vertex_touched[cell->vertex_index(v)] = true;
2665 * Point<dim> vertex_displacement;
2666 * for (unsigned int d = 0; d < dim; ++d)
2667 * vertex_displacement[d] =
2668 * incremental_displacement(cell->vertex_dof_index(v, d));
2670 * cell->vertex(v) += vertex_displacement;
2678 * <a name="step_18-TopLevelsetup_quadrature_point_history"></a>
2679 * <h4>TopLevel::setup_quadrature_point_history</h4>
2683 * At the beginning of our computations, we needed to set up initial values
2684 * of the history variables, such as the existing stresses in the material,
2685 * that we store in each quadrature point. As mentioned above, we use the
2686 * <code>user_pointer</code> for this that is available in each cell.
2690 * To put this into larger perspective, we note that if we had previously
2691 * available stresses in our model (which we assume do not exist for the
2692 * purpose of this program), then we would need to interpolate the field of
2693 * preexisting stresses to the quadrature points. Likewise, if we were to
2694 * simulate elasto-plastic materials with hardening/softening, then we would
2695 * have to store additional history variables like the present yield stress
2696 * of the accumulated plastic strains in each quadrature
2697 * points. Pre-existing hardening or weakening would then be implemented by
2698 * interpolating these variables in the present function as well.
2701 * template <int dim>
2702 * void TopLevel<dim>::setup_quadrature_point_history()
2706 * For good measure, we set all user pointers of all cells, whether
2707 * ours of not, to the null pointer. This way, if we ever access the user
2708 * pointer of a cell which we should not have accessed, a segmentation
2709 * fault will let us know that this should not have happened:
2715 * triangulation.clear_user_data();
2719 * Next, allocate the quadrature objects that are within the responsibility
2720 * of this processor. This, of course, equals the number of cells that
2721 * belong to this processor times the number of quadrature points our
2722 * quadrature formula has on each cell. Since the `resize()` function does
2723 * not actually shrink the amount of allocated memory if the requested new
2724 * size is smaller than the old size, we resort to a trick to first free all
2725 * memory, and then reallocate it: we declare an empty vector as a temporary
2726 * variable and then swap the contents of the old vector and this temporary
2727 * variable. This makes sure that the `quadrature_point_history` is now
2728 * really empty, and we can let the temporary variable that now holds the
2729 * previous contents of the vector go out of scope and be destroyed. In the
2730 * next step we can then re-allocate as many elements as we need, with the
2731 * vector default-initializing the `PointHistory` objects, which includes
2732 * setting the stress variables to zero.
2736 * std::vector<PointHistory<dim>> tmp;
2737 * quadrature_point_history.swap(tmp);
2739 * quadrature_point_history.resize(
2740 * triangulation.n_locally_owned_active_cells() * quadrature_formula.size());
2744 * Finally loop over all cells again and set the user pointers from the
2745 * cells that belong to the present processor to point to the first
2746 * quadrature point objects corresponding to this cell in the vector of
2750 * unsigned int history_index = 0;
2751 * for (auto &cell : triangulation.active_cell_iterators())
2752 * if (cell->is_locally_owned())
2754 * cell->set_user_pointer(&quadrature_point_history[history_index]);
2755 * history_index += quadrature_formula.size();
2760 * At the end, for good measure make sure that our count of elements was
2761 * correct and that we have both used up all objects we allocated
2762 * previously, and not point to any objects beyond the end of the
2763 * vector. Such defensive programming strategies are always good checks to
2764 * avoid accidental errors and to guard against future changes to this
2765 * function that forget to update all uses of a variable at the same
2766 * time. Recall that constructs using the <code>Assert</code> macro are
2767 * optimized away in optimized mode, so do not affect the run time of
2771 * Assert(history_index == quadrature_point_history.size(),
2772 * ExcInternalError());
2780 * <a name="step_18-TopLevelupdate_quadrature_point_history"></a>
2781 * <h4>TopLevel::update_quadrature_point_history</h4>
2785 * At the end of each time step, we should have computed an incremental
2786 * displacement update so that the material in its new configuration
2787 * accommodates for the difference between the external body and boundary
2788 * forces applied during this time step minus the forces exerted through
2789 * preexisting internal stresses. In order to have the preexisting
2790 * stresses available at the next time step, we therefore have to update the
2791 * preexisting stresses with the stresses due to the incremental
2792 * displacement computed during the present time step. Ideally, the
2793 * resulting sum of internal stresses would exactly counter all external
2794 * forces. Indeed, a simple experiment can make sure that this is so: if we
2795 * choose boundary conditions and body forces to be time independent, then
2796 * the forcing terms (the sum of external forces and internal stresses)
2797 * should be exactly zero. If you make this experiment, you will realize
2798 * from the output of the norm of the right hand side in each time step that
2799 * this is almost the case: it is not exactly zero, since in the first time
2800 * step the incremental displacement and stress updates were computed
2801 * relative to the undeformed mesh, which was then deformed. In the second
2802 * time step, we again compute displacement and stress updates, but this
2803 * time in the deformed mesh -- there, the resulting updates are very small
2804 * but not quite zero. This can be iterated, and in each such iteration the
2805 * residual, i.e. the norm of the right hand side vector, is reduced; if one
2806 * makes this little experiment, one realizes that the norm of this residual
2807 * decays exponentially with the number of iterations, and after an initial
2808 * very rapid decline is reduced by roughly a factor of about 3.5 in each
2809 * iteration (for one testcase I looked at, other testcases, and other
2810 * numbers of unknowns change the factor, but not the exponential decay).
2814 * In a sense, this can then be considered as a quasi-timestepping scheme to
2815 * resolve the nonlinear problem of solving large-deformation elasticity on
2816 * a mesh that is moved along in a Lagrangian manner.
2820 * Another complication is that the existing (old) stresses are defined on
2821 * the old mesh, which we will move around after updating the stresses. If
2822 * this mesh update involves rotations of the cell, then we need to also
2823 * rotate the updated stress, since it was computed relative to the
2824 * coordinate system of the old cell.
2828 * Thus, what we need is the following: on each cell which the present
2829 * processor owns, we need to extract the old stress from the data stored
2830 * with each quadrature point, compute the stress update, add the two
2831 * together, and then rotate the result together with the incremental
2832 * rotation computed from the incremental displacement at the present
2833 * quadrature point. We will detail these steps below:
2836 * template <int dim>
2837 * void TopLevel<dim>::update_quadrature_point_history()
2841 * First, set up an <code>FEValues</code> object by which we will evaluate
2842 * the incremental displacements and the gradients thereof at the
2843 * quadrature points, together with a vector that will hold this
2847 * FEValues<dim> fe_values(fe,
2848 * quadrature_formula,
2849 * update_values | update_gradients);
2851 * std::vector<std::vector<Tensor<1, dim>>> displacement_increment_grads(
2852 * quadrature_formula.size(), std::vector<Tensor<1, dim>>(dim));
2856 * Then loop over all cells and do the job in the cells that belong to our
2860 * for (auto &cell : dof_handler.active_cell_iterators())
2861 * if (cell->is_locally_owned())
2865 * Next, get a pointer to the quadrature point history data local to
2866 * the present cell, and, as a defensive measure, make sure that
2867 * this pointer is within the bounds of the global array:
2870 * PointHistory<dim> *local_quadrature_points_history =
2871 * reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
2872 * Assert(local_quadrature_points_history >=
2873 * &quadrature_point_history.front(),
2874 * ExcInternalError());
2875 * Assert(local_quadrature_points_history <=
2876 * &quadrature_point_history.back(),
2877 * ExcInternalError());
2881 * Then initialize the <code>FEValues</code> object on the present
2882 * cell, and extract the gradients of the displacement at the
2883 * quadrature points for later computation of the strains
2886 * fe_values.reinit(cell);
2887 * fe_values.get_function_gradients(incremental_displacement,
2888 * displacement_increment_grads);
2892 * Then loop over the quadrature points of this cell:
2895 * for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2899 * On each quadrature point, compute the strain increment from
2900 * the gradients, and multiply it by the stress-strain tensor to
2901 * get the stress update. Then add this update to the already
2902 * existing strain at this point:
2905 * const SymmetricTensor<2, dim> new_stress =
2906 * (local_quadrature_points_history[q].old_stress +
2907 * (stress_strain_tensor *
2908 * get_strain(displacement_increment_grads[q])));
2912 * Finally, we have to rotate the result. For this, we first
2913 * have to compute a rotation matrix at the present quadrature
2914 * point from the incremental displacements. In fact, it can be
2915 * computed from the gradients, and we already have a function
2919 * const Tensor<2, dim> rotation =
2920 * get_rotation_matrix(displacement_increment_grads[q]);
2923 * Note that the result, a rotation matrix, is in general an
2924 * antisymmetric tensor of rank 2, so we must store it as a full
2929 * With this rotation matrix, we can compute the rotated tensor
2930 * by contraction from the left and right, after we expand the
2931 * symmetric tensor <code>new_stress</code> into a full tensor:
2934 * const SymmetricTensor<2, dim> rotated_new_stress =
2935 * symmetrize(transpose(rotation) *
2936 * static_cast<Tensor<2, dim>>(new_stress) * rotation);
2939 * Note that while the result of the multiplication of these
2940 * three matrices should be symmetric, it is not due to floating
2941 * point round off: we get an asymmetry on the order of 1e-16 of
2942 * the off-diagonal elements of the result. When assigning the
2943 * result to a <code>SymmetricTensor</code>, the constructor of
2944 * that class checks the symmetry and realizes that it isn't
2945 * exactly
symmetric; it will then raise an exception. To avoid
2946 * that, we explicitly
symmetrize the result to make it exactly
2951 * The result of all these operations is then written back into
2952 * the original place:
2955 * local_quadrature_points_history[q].old_stress =
2956 * rotated_new_stress;
2963 * This ends the
project specific namespace <code>Step18</code>. The rest is
2964 * as usual and as already shown in @ref step_17
"step-17": A <code>main()</code> function
2965 * that initializes and terminates
PETSc, calls the classes that
do the
2966 * actual work, and makes sure that we
catch all exceptions that propagate
2973 *
int main(
int argc,
char **argv)
2977 *
using namespace dealii;
2978 *
using namespace Step18;
2982 * TopLevel<3> elastic_problem;
2983 * elastic_problem.run();
2985 *
catch (std::exception &exc)
2987 * std::cerr << std::endl
2989 * <<
"----------------------------------------------------"
2991 * std::cerr <<
"Exception on processing: " << std::endl
2992 * << exc.what() << std::endl
2993 * <<
"Aborting!" << std::endl
2994 * <<
"----------------------------------------------------"
3001 * std::cerr << std::endl
3003 * <<
"----------------------------------------------------"
3005 * std::cerr <<
"Unknown exception!" << std::endl
3006 * <<
"Aborting!" << std::endl
3007 * <<
"----------------------------------------------------"
3015<a name=
"step_18-Results"></a><h1>Results</h1>
3019Running the program takes a good
while if one uses
debug mode; it takes about
3020eleven minutes on my i7 desktop. Fortunately, the version compiled with
3021optimizations is much faster; the program only takes about a minute and a half
3022after recompiling with the command <tt>make
release</tt> on the same machine, a
3023much more reasonable time.
3026If
run, the program prints the following output, explaining what it is
3027doing during all that time:
3030[ 66%] Built target step-18
3031[100%] Run step-18 with Release configuration
3034 Number of active cells: 3712 (by
partition: 3712)
3035 Number of degrees of freedom: 17226 (by
partition: 17226)
3036 Assembling system...
norm of rhs is 1.88062e+10
3037 Solver converged in 103 iterations.
3038 Updating quadrature
point data...
3040 Number of active cells: 12812 (by
partition: 12812)
3041 Number of degrees of freedom: 51738 (by
partition: 51738)
3042 Assembling system...
norm of rhs is 1.86145e+10
3043 Solver converged in 121 iterations.
3044 Updating quadrature
point data...
3048 Assembling system...
norm of rhs is 1.84169e+10
3049 Solver converged in 122 iterations.
3050 Updating quadrature
point data...
3054 Assembling system...
norm of rhs is 1.82355e+10
3055 Solver converged in 122 iterations.
3056 Updating quadrature
point data...
3060 Assembling system...
norm of rhs is 1.80728e+10
3061 Solver converged in 117 iterations.
3062 Updating quadrature
point data...
3066 Assembling system...
norm of rhs is 1.79318e+10
3067 Solver converged in 116 iterations.
3068 Updating quadrature
point data...
3072 Assembling system...
norm of rhs is 1.78171e+10
3073 Solver converged in 115 iterations.
3074 Updating quadrature
point data...
3078 Assembling system...
norm of rhs is 1.7737e+10
3079 Solver converged in 112 iterations.
3080 Updating quadrature
point data...
3084 Assembling system...
norm of rhs is 1.77127e+10
3085 Solver converged in 111 iterations.
3086 Updating quadrature
point data...
3090 Assembling system...
norm of rhs is 1.78207e+10
3091 Solver converged in 113 iterations.
3092 Updating quadrature
point data...
3095Timestep 10 at time 10
3096 Assembling system...
norm of rhs is 1.83544e+10
3097 Solver converged in 115 iterations.
3098 Updating quadrature
point data...
3101[100%] Built target
run
3102make
run 176.82s user 0.15s system 198% cpu 1:28.94 total
3104In other words, it is computing on 12,000 cells and with some 52,000
3105unknowns. Not a whole lot, but enough for a coupled three-dimensional
3106problem to keep a computer busy for a while. At the
end of the day,
3107this is what we have for output:
3110-rw-r--r-- 1 drwells users 1706059 Feb 13 19:36 solution-0010.000.
vtu
3111-rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0010.pvtu
3112-rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0010.visit
3113-rw-r--r-- 1 drwells users 1707907 Feb 13 19:36 solution-0009.000.
vtu
3114-rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0009.pvtu
3115-rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0009.visit
3116-rw-r--r-- 1 drwells users 1703771 Feb 13 19:35 solution-0008.000.
vtu
3117-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0008.pvtu
3118-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0008.visit
3119-rw-r--r-- 1 drwells users 1693671 Feb 13 19:35 solution-0007.000.
vtu
3120-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0007.pvtu
3121-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0007.visit
3122-rw-r--r-- 1 drwells users 1681847 Feb 13 19:35 solution-0006.000.
vtu
3123-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0006.pvtu
3124-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0006.visit
3125-rw-r--r-- 1 drwells users 1670115 Feb 13 19:35 solution-0005.000.
vtu
3126-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0005.pvtu
3127-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0005.visit
3128-rw-r--r-- 1 drwells users 1658559 Feb 13 19:35 solution-0004.000.
vtu
3129-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0004.pvtu
3130-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0004.visit
3131-rw-r--r-- 1 drwells users 1639983 Feb 13 19:35 solution-0003.000.
vtu
3132-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0003.pvtu
3133-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0003.visit
3134-rw-r--r-- 1 drwells users 1625851 Feb 13 19:35 solution-0002.000.
vtu
3135-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0002.pvtu
3136-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0002.visit
3137-rw-r--r-- 1 drwells users 1616035 Feb 13 19:34 solution-0001.000.
vtu
3138-rw-r--r-- 1 drwells users 761 Feb 13 19:34 solution-0001.pvtu
3139-rw-r--r-- 1 drwells users 33 Feb 13 19:34 solution-0001.visit
3143If we visualize these files with VisIt or Paraview, we get to see the full picture
3144of the disaster our forced compression wreaks on the
cylinder (colors in the
3145images encode the
norm of the stress in the material):
3148<div class=
"threecolumn" style=
"width: 80%">
3149 <div class=
"parent">
3150 <div class=
"img" align=
"center">
3151 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0002.0000.png"
3155 <div class=
"text" align=
"center">
3159 <div class=
"parent">
3160 <div class=
"img" align=
"center">
3161 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0005.0000.png"
3165 <div class=
"text" align=
"center">
3169 <div class=
"parent">
3170 <div class=
"img" align=
"center">
3171 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0007.0000.png"
3175 <div class=
"text" align=
"center">
3182<div class=
"threecolumn" style=
"width: 80%">
3183 <div class=
"parent">
3184 <div class=
"img" align=
"center">
3185 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0008.0000.png"
3189 <div class=
"text" align=
"center">
3193 <div class=
"parent">
3194 <div class=
"img" align=
"center">
3195 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0009.0000.png"
3199 <div class=
"text" align=
"center">
3203 <div class=
"parent">
3204 <div class=
"img" align=
"center">
3205 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0010.0000.png"
3209 <div class=
"text" align=
"center">
3216As is clearly visible, as we keep compressing the
cylinder, it starts
3217to bow out near the fully constrained bottom surface and, after about eight
3218time units, buckle in an azimuthally
symmetric manner.
3221Although the result appears plausible for the
symmetric geometry and loading,
3222it is yet to be established whether or not the computation is fully converged.
3223In order to see whether it is, we ran the program again with
one more global
3224refinement at the beginning and with the time step halved. This would have
3225taken a very long time on a single machine, so we used a proper workstation and
3226ran it on 16 processors in
parallel. The beginning of the output now looks like
3229Timestep 1 at time 0.5
3231 Number of active cells: 29696 (by
partition: 1808+1802+1894+1881+1870+1840+1884+1810+1876+1818+1870+1884+1854+1903+1816+1886)
3232 Number of degrees of freedom: 113100 (by
partition: 6936+6930+7305+7116+7326+6869+7331+6786+7193+6829+7093+7162+6920+7280+6843+7181)
3233 Assembling system...
norm of rhs is 1.10765e+10
3234 Solver converged in 209 iterations.
3235 Updating quadrature
point data...
3237 Number of active cells: 102034 (by
partition: 6387+6202+6421+6341+6408+6201+6428+6428+6385+6294+6506+6244+6417+6527+6299+6546)
3238 Number of degrees of freedom: 359337 (by
partition: 23255+21308+24774+24019+22304+21415+22430+22184+22298+21796+22396+21592+22325+22553+21977+22711)
3239 Assembling system...
norm of rhs is 1.35759e+10
3240 Solver converged in 268 iterations.
3241 Updating quadrature
point data...
3245 Assembling system...
norm of rhs is 1.34674e+10
3246 Solver converged in 267 iterations.
3247 Updating quadrature
point data...
3250Timestep 3 at time 1.5
3251 Assembling system...
norm of rhs is 1.33607e+10
3252 Solver converged in 265 iterations.
3253 Updating quadrature
point data...
3257 Assembling system...
norm of rhs is 1.32558e+10
3258 Solver converged in 263 iterations.
3259 Updating quadrature
point data...
3264Timestep 20 at time 10
3265 Assembling system...
norm of rhs is 1.47755e+10
3266 Solver converged in 425 iterations.
3267 Updating quadrature
point data...
3270That
's quite a good number of unknowns, given that we are in 3d. The output of
3271this program are 16 files for each time step:
3273\$ ls -l solution-0001*
3274-rw-r--r-- 1 wellsd2 user 761065 Feb 13 21:09 solution-0001.000.vtu
3275-rw-r--r-- 1 wellsd2 user 759277 Feb 13 21:09 solution-0001.001.vtu
3276-rw-r--r-- 1 wellsd2 user 761217 Feb 13 21:09 solution-0001.002.vtu
3277-rw-r--r-- 1 wellsd2 user 761605 Feb 13 21:09 solution-0001.003.vtu
3278-rw-r--r-- 1 wellsd2 user 756917 Feb 13 21:09 solution-0001.004.vtu
3279-rw-r--r-- 1 wellsd2 user 752669 Feb 13 21:09 solution-0001.005.vtu
3280-rw-r--r-- 1 wellsd2 user 735217 Feb 13 21:09 solution-0001.006.vtu
3281-rw-r--r-- 1 wellsd2 user 750065 Feb 13 21:09 solution-0001.007.vtu
3282-rw-r--r-- 1 wellsd2 user 760273 Feb 13 21:09 solution-0001.008.vtu
3283-rw-r--r-- 1 wellsd2 user 777265 Feb 13 21:09 solution-0001.009.vtu
3284-rw-r--r-- 1 wellsd2 user 772469 Feb 13 21:09 solution-0001.010.vtu
3285-rw-r--r-- 1 wellsd2 user 760833 Feb 13 21:09 solution-0001.011.vtu
3286-rw-r--r-- 1 wellsd2 user 782241 Feb 13 21:09 solution-0001.012.vtu
3287-rw-r--r-- 1 wellsd2 user 748905 Feb 13 21:09 solution-0001.013.vtu
3288-rw-r--r-- 1 wellsd2 user 738413 Feb 13 21:09 solution-0001.014.vtu
3289-rw-r--r-- 1 wellsd2 user 762133 Feb 13 21:09 solution-0001.015.vtu
3290-rw-r--r-- 1 wellsd2 user 1421 Feb 13 21:09 solution-0001.pvtu
3291-rw-r--r-- 1 wellsd2 user 364 Feb 13 21:09 solution-0001.visit
3295Here are first the mesh on which we compute as well as the partitioning
3296for the 16 processors:
3299<div class="twocolumn" style="width: 80%">
3300 <div class="parent">
3301 <div class="img" align="center">
3302 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-000mesh.png"
3303 alt="Discretization"
3306 <div class="text" align="center">
3310 <div class="parent">
3311 <div class="img" align="center">
3312 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.p.png"
3313 alt="Parallel partitioning"
3316 <div class="text" align="center">
3317 Parallel partitioning
3323Finally, here is the same output as we have shown before for the much smaller
3326<div class="threecolumn" style="width: 80%">
3327 <div class="parent">
3328 <div class="img" align="center">
3329 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.s.png"
3333 <div class="text" align="center">
3337 <div class="parent">
3338 <div class="img" align="center">
3339 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0005.s.png"
3343 <div class="text" align="center">
3347 <div class="parent">
3348 <div class="img" align="center">
3349 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0007.s.png"
3353 <div class="text" align="center">
3360<div class="threecolumn" style="width: 80%">
3361 <div class="parent">
3362 <div class="img" align="center">
3363 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0008.s.png"
3367 <div class="text" align="center">
3371 <div class="parent">
3372 <div class="img" align="center">
3373 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0009.s.png"
3377 <div class="text" align="center">
3381 <div class="parent">
3382 <div class="img" align="center">
3383 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0010.s.png"
3387 <div class="text" align="center">
3394As before, we observe that at high axial compression the cylinder begins
3395to buckle, but this time ultimately collapses on itself. In contrast to our
3396first run, towards the end of the simulation the deflection pattern becomes
3397nonsymmetric (the central bulge deflects laterally). The model clearly does not
3398provide for this (all our forces and boundary deflections are symmetric) but the
3399effect is probably physically correct anyway: in reality, small inhomogeneities
3400in the body's material properties would lead it to buckle to
one side
3401to evade the forcing; in numerical simulations, small perturbations
3402such as numerical round-off or an inexact solution of a linear system
3403by an iterative solver could have the same effect. Another typical source
for
3404asymmetries in adaptive computations is that only a certain fraction of cells
3405is refined in each step, which may lead to asymmetric meshes even
if the
3409If
one compares
this with the previous
run, the results both qualitatively
3410and quantitatively different. The previous computation was
3411therefore certainly not converged, though we can
't say for sure anything about
3412the present one. One would need an even finer computation to find out. However,
3413the point may be moot: looking at the last picture in detail, it is pretty
3414obvious that not only is the linear small deformation model we chose completely
3415inadequate, but for a realistic simulation we would also need to make sure that
3416the body does not intersect itself during deformation (if we continued
3417compressing the cylinder we would observe some self-intersection).
3418Without such a formulation we cannot expect anything to make physical sense,
3419even if it produces nice pictures!
3422<a name="step_18-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
3425The program as is does not really solve an equation that has many applications
3426in practice: quasi-static material deformation based on a purely elastic law
3427is almost boring. However, the program may serve as the starting point for
3428more interesting experiments, and that indeed was the initial motivation for
3429writing it. Here are some suggestions of what the program is missing and in
3430what direction it may be extended:
3432<a name="step_18-Plasticitymodels"></a><h5>Plasticity models</h5>
3435 The most obvious extension is to use a more
3436realistic material model for large-scale quasistatic deformation. The natural
3437choice for this would be plasticity, in which a nonlinear relationship between
3438stress and strain replaces equation @ref step_18-StressStrain "[stress-strain]".
3439Plasticity models are usually rather complicated to program since the stress-strain
3440dependence is generally non-smooth. The material can be thought of being able
3441to withstand only a maximal stress (the yield stress) after which it starts to
3442“flow”. A mathematical description to this can be given in the form of a
3443variational inequality, which alternatively can be treated as minimizing the
3447 (\varepsilon(\mathbf{u}), C\varepsilon(\mathbf{u}))_{\Omega}
3448 - (\mathbf{f}, \mathbf{u})_{\Omega} - (\mathbf{b}, \mathbf{u})_{\Gamma_N},
3450subject to the constraint
3452 f(\sigma(\mathbf{u})) \le 0
3454on the stress. This extension makes the problem to be solved in each time step
3455nonlinear, so we need another loop within each time step.
3457Without going into further details of this model, we refer to the excellent
3458book by Simo and Hughes on “Computational Inelasticity” for a
3459comprehensive overview of computational strategies for solving plastic
3460models. Alternatively, a brief but concise description of an algorithm for
3461plasticity is given in an article by S. Commend, A. Truty, and Th. Zimmermann;
3465<a name="step_18-Stabilizationissues"></a><h5>Stabilization issues</h5>
3468The formulation we have chosen, i.e. using
3469piecewise (bi-, tri-)linear elements for all components of the displacement
3470vector, and treating the stress as a variable dependent on the displacement is
3471appropriate for most materials. However, this so-called displacement-based
3472formulation becomes unstable and exhibits spurious modes for incompressible or
3473nearly-incompressible materials. While fluids are usually not elastic (in most
3474cases, the stress depends on velocity gradients, not displacement gradients,
3475although there are exceptions such as electro-rheologic fluids), there are a
3476few solids that are nearly incompressible, for example rubber. Another case is
3477that many plasticity models ultimately let the material become incompressible,
3478although this is outside the scope of the present program.
3480Incompressibility is characterized by Poisson's ratio
3484where @f$\lambda,\mu@f$ are the Lamé constants of the material.
3485Physical constraints indicate that @f$-1\le \nu\le \frac 12@f$ (the condition
3486also follows from mathematical stability considerations). If @f$\nu@f$
3487approaches @f$\frac 12@f$, then the material becomes incompressible. In that
3488case, pure displacement-based formulations are no longer appropriate
for the
3489solution of such problems, and stabilization techniques have to be employed
3490for a stable and accurate solution. The book and paper cited above give
3491indications as to how to
do this, but there is also a large
volume of
3492literature on
this subject; a good start to get an overview of the topic can
3493be found in the references of the paper by H.-Y. Duan and Q. Lin; @cite DL05.
3496<a name=
"step_18-Refinementduringtimesteps"></a><h5>Refinement during timesteps</h5>
3499In the present form, the program
3500only refines the
initial mesh a number of times, but then never again. For any
3501kind of realistic simulation,
one would want to extend
this so that the mesh
3502is refined and coarsened every few time steps instead. This is not hard to
do,
3503in fact, but has been left
for future tutorial programs or as an exercise,
if
3506The main complication
one has to overcome is that
one has to
3507transfer the data that is stored in the quadrature points of the cells of the
3508old mesh to the
new mesh, preferably by some sort of projection scheme. The
3509general approach to
this would go like
this:
3511- At the beginning, the data is only available in the quadrature points of
3512 individual cells, not as a finite element field that is defined everywhere.
3514- So let us find a finite element field that <i>is</i> defined everywhere so
3515 that we can later
interpolate it to the quadrature points of the
new
3516 mesh. In
general, it will be difficult to find a continuous finite element
3517 field that matches the values in the quadrature points exactly because the
3518 number of degrees of freedom of these fields does not match the number of
3519 quadrature points there are, and the nodal values of
this global field will
3520 either be over- or underdetermined. But it is usually not very difficult to
3521 find a discontinuous field that matches the values in the quadrature points;
3522 for example,
if you have a
QGauss(2) quadrature formula (i.e. 4 points per
3523 cell in 2d, 8 points in 3d), then one would use a finite element of kind
3524 FE_DGQ(1), i.e. bi-/tri-linear functions as these have 4 degrees of freedom
3525 per cell in 2d and 8 in 3d.
3527- There are functions that can make this conversion from individual points to
3528 a global field simpler. The following piece of pseudo-code should help if
3529 you use a
QGauss(2) quadrature formula. Note that the multiplication by the
3530 projection matrix below takes a vector of scalar components, i.e., we can only
3531 convert one set of scalars at a time from the quadrature points to the degrees
3532 of freedom and vice versa. So we need to store each component of stress separately,
3533 which requires <code>dim*dim</code> vectors. We'll store this set of vectors in a 2D array to
3534 make it easier to read off components in the same way you would the stress tensor.
3535 Thus, we'll loop over the components of stress on each cell and store
3536 these values in the global history field. (The prefix <code>history_</code>
3537 indicates that we work with quantities related to the history variables defined
3538 in the quadrature points.)
3540 FE_DGQ<dim> history_fe (1);
3541 DoFHandler<dim> history_dof_handler (triangulation);
3542 history_dof_handler.distribute_dofs (history_fe);
3545 history_field (dim,
std::vector<
Vector<
double> >(dim)),
3546 local_history_values_at_qpoints (dim,
std::vector<
Vector<
double> >(dim)),
3547 local_history_fe_values (dim,
std::vector<
Vector<
double> >(dim));
3549 for (
unsigned int i=0; i<dim; ++i)
3550 for (
unsigned int j=0; j<dim; ++j)
3552 history_field[i][j].
reinit(history_dof_handler.n_dofs());
3553 local_history_values_at_qpoints[i][j].reinit(quadrature.size());
3554 local_history_fe_values[i][j].reinit(history_fe.n_dofs_per_cell());
3561 quadrature, quadrature,
3562 qpoint_to_dof_matrix);
3565 endc = dof_handler.end(),
3566 dg_cell = history_dof_handler.begin_active();
3568 for (; cell!=endc; ++cell, ++dg_cell)
3571 PointHistory<dim> *local_quadrature_points_history
3572 =
reinterpret_cast<PointHistory<dim> *
>(cell->user_pointer());
3574 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3575 ExcInternalError());
3576 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3577 ExcInternalError());
3579 for (
unsigned int i=0; i<dim; ++i)
3580 for (
unsigned int j=0; j<dim; ++j)
3582 for (
unsigned int q=0; q<quadrature.size(); ++q)
3583 local_history_values_at_qpoints[i][j](q)
3584 = local_quadrature_points_history[q].old_stress[i][j];
3586 qpoint_to_dof_matrix.vmult (local_history_fe_values[i][j],
3587 local_history_values_at_qpoints[i][j]);
3589 dg_cell->set_dof_values (local_history_fe_values[i][j],
3590 history_field[i][j]);
3595- Now that we have a global field, we can
refine the mesh and transfer the
3597 interpolate everything from the old to the
new mesh.
3599- In a
final step, we have to get the data back from the now interpolated
3600 global field to the quadrature points on the
new mesh. The following code
3604 history_fe.n_dofs_per_cell());
3608 dof_to_qpoint_matrix);
3611 endc = dof_handler.end(),
3612 dg_cell = history_dof_handler.begin_active();
3614 for (; cell != endc; ++cell, ++dg_cell)
3616 PointHistory<dim> *local_quadrature_points_history
3617 =
reinterpret_cast<PointHistory<dim> *
>(cell->user_pointer());
3619 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3620 ExcInternalError());
3621 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3622 ExcInternalError());
3624 for (
unsigned int i=0; i<dim; ++i)
3625 for (
unsigned int j=0; j<dim; ++j)
3627 dg_cell->get_dof_values (history_field[i][j],
3628 local_history_fe_values[i][j]);
3630 dof_to_qpoint_matrix.vmult (local_history_values_at_qpoints[i][j],
3631 local_history_fe_values[i][j]);
3633 for (
unsigned int q=0; q<quadrature.size(); ++q)
3634 local_quadrature_points_history[q].old_stress[i][j]
3635 = local_history_values_at_qpoints[i][j](q);
3639It becomes a bit more complicated once we
run the program in
parallel, since
3640then each process only stores
this data
for the cells it owned on the old
3641mesh. That said,
using a
parallel vector for <code>history_field</code> will
3642do the trick
if you put a call to <code>
compress</code> after the transfer
3643from quadrature points into the global vector.
3646<a name=
"step_18-Ensuringmeshregularity"></a><h5>Ensuring mesh regularity</h5>
3649At present, the program makes no attempt
3650to make sure that a cell, after moving its vertices at the
end of the time
3652positive and bounded away from zero everywhere). It is, in fact, not very hard
3653to set boundary values and forcing terms in such a way that
one gets distorted
3654and inverted cells rather quickly. Certainly, in some cases of large
3655deformation,
this is unavoidable with a mesh of finite mesh size, but in some
3656other cases
this should be preventable by appropriate mesh refinement and/or a
3657reduction of the time step size. The program does not
do that, but a more
3658sophisticated version definitely should employ some sort of heuristic defining
3659what amount of deformation of cells is acceptable, and what isn
't.
3662<a name="step_18-PlainProg"></a>
3663<h1> The plain program</h1>
3664@include "step-18.cc"
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
active_cell_iterator begin_active(const unsigned int level=0) const
virtual void vector_value_list(const std::vector< Point< dim > > &points, std::vector< Vector< RangeNumberType > > &values) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
const unsigned int DoFAccessor< structdim, dim, spacedim, level_dof_access >::dimension
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
const bool IsBlockVector< VectorType >::value
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void cylinder(Triangulation< dim > &tria, const double radius=1., const double half_length=1.)
void cylinder_shell(Triangulation< dim > &tria, const double length, const double inner_radius, const double outer_radius, const unsigned int n_radial_cells=0, const unsigned int n_axial_cells=0, const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ general
No special properties.
constexpr types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string compress(const std::string &input)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void copy(const T *begin, const T *end, U *dest)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)