204 * #include <deal.II/base/function.h>
205 * #include <deal.II/base/logstream.h>
206 * #include <deal.II/base/quadrature_lib.h>
207 * #include <deal.II/base/quadrature_point_data.h>
208 * #include <deal.II/base/tensor.h>
209 * #include <deal.II/base/timer.h>
210 * #include <deal.II/base/utilities.h>
212 * #include <deal.II/lac/affine_constraints.h>
213 * #include <deal.II/lac/block_sparse_matrix.h>
214 * #include <deal.II/lac/block_vector.h>
215 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
216 * #include <deal.II/lac/full_matrix.h>
217 * #include <deal.II/lac/precondition.h>
218 * #include <deal.II/lac/solver_cg.h>
219 * #include <deal.II/lac/solver_gmres.h>
220 * #include <deal.II/lac/sparse_direct.h>
221 * #include <deal.II/lac/sparsity_tools.h>
223 * #include <deal.II/lac/petsc_block_sparse_matrix.h>
224 * #include <deal.II/lac/petsc_sparse_matrix.h>
225 * #include <deal.II/lac/petsc_vector.h>
226 * #include <deal.II/lac/petsc_precondition.h>
227 * #include <deal.II/lac/petsc_solver.h>
229 * #include <deal.II/grid/grid_generator.h>
230 * #include <deal.II/grid/grid_refinement.h>
231 * #include <deal.II/grid/grid_tools.h>
232 * #include <deal.II/grid/manifold_lib.h>
233 * #include <deal.II/grid/tria.h>
234 * #include <deal.II/grid/tria_accessor.h>
235 * #include <deal.II/grid/tria_iterator.h>
237 * #include <deal.II/dofs/dof_accessor.h>
238 * #include <deal.II/dofs/dof_handler.h>
239 * #include <deal.II/dofs/dof_renumbering.h>
240 * #include <deal.II/dofs/dof_tools.h>
242 * #include <deal.II/fe/fe_q.h>
243 * #include <deal.II/fe/fe_system.h>
244 * #include <deal.II/fe/fe_values.h>
246 * #include <deal.II/numerics/data_out.h>
247 * #include <deal.II/numerics/error_estimator.h>
248 * #include <deal.II/numerics/matrix_tools.h>
249 * #include <deal.II/numerics/vector_tools.h>
251 * #include <deal.II/distributed/grid_refinement.h>
252 * #include <deal.II/distributed/solution_transfer.h>
253 * #include <deal.II/distributed/tria.h>
256 * #include <iostream>
266 * <a name=
"time_dependent_navier_stokes.cc-Createthetriangulation"></a>
267 * <h3>Create the triangulation</h3>
268 * The code to create triangulation is copied from
269 * [Martin Kronbichler
's
270 * code](https://github.com/kronbichler/adaflo/blob/master/tests/flow_past_cylinder.cc)
271 * with very few modifications.
276 * <a name="time_dependent_navier_stokes.cc-Helperfunction"></a>
277 * <h4>Helper function</h4>
280 * void create_triangulation_2d(Triangulation<2> &tria,
281 * bool compute_in_2d = true)
283 * SphericalManifold<2> boundary(Point<2>(0.5, 0.2));
284 * Triangulation<2> left, middle, right, tmp, tmp2;
285 * GridGenerator::subdivided_hyper_rectangle(
287 * std::vector<unsigned int>({3U, 4U}),
289 * Point<2>(0.3, 0.41),
291 * GridGenerator::subdivided_hyper_rectangle(
293 * std::vector<unsigned int>({18U, 4U}),
295 * Point<2>(2.5, 0.41),
300 * Create middle part first as a hyper shell.
303 * GridGenerator::hyper_shell(middle, Point<2>(0.5, 0.2), 0.05, 0.2, 4, true);
304 * middle.reset_all_manifolds();
305 * for (Triangulation<2>::cell_iterator cell = middle.begin();
306 * cell != middle.end();
308 * for (unsigned int f = 0; f < GeometryInfo<2>::faces_per_cell; ++f)
310 * bool is_inner_rim = true;
311 * for (unsigned int v = 0; v < GeometryInfo<2>::vertices_per_face; ++v)
313 * Point<2> &vertex = cell->face(f)->vertex(v);
314 * if (std::abs(vertex.distance(Point<2>(0.5, 0.2)) - 0.05) > 1e-10)
316 * is_inner_rim = false;
321 * cell->face(f)->set_manifold_id(1);
323 * middle.set_manifold(1, boundary);
324 * middle.refine_global(1);
328 * Then move the vertices to the points where we want them to be to create a
329 * slightly asymmetric cube with a hole:
332 * for (Triangulation<2>::cell_iterator cell = middle.begin();
333 * cell != middle.end();
335 * for (unsigned int v = 0; v < GeometryInfo<2>::vertices_per_cell; ++v)
337 * Point<2> &vertex = cell->vertex(v);
338 * if (std::abs(vertex[0] - 0.7) < 1e-10 &&
339 * std::abs(vertex[1] - 0.2) < 1e-10)
340 * vertex = Point<2>(0.7, 0.205);
341 * else if (std::abs(vertex[0] - 0.6) < 1e-10 &&
342 * std::abs(vertex[1] - 0.3) < 1e-10)
343 * vertex = Point<2>(0.7, 0.41);
344 * else if (std::abs(vertex[0] - 0.6) < 1e-10 &&
345 * std::abs(vertex[1] - 0.1) < 1e-10)
346 * vertex = Point<2>(0.7, 0);
347 * else if (std::abs(vertex[0] - 0.5) < 1e-10 &&
348 * std::abs(vertex[1] - 0.4) < 1e-10)
349 * vertex = Point<2>(0.5, 0.41);
350 * else if (std::abs(vertex[0] - 0.5) < 1e-10 &&
351 * std::abs(vertex[1] - 0.0) < 1e-10)
352 * vertex = Point<2>(0.5, 0.0);
353 * else if (std::abs(vertex[0] - 0.4) < 1e-10 &&
354 * std::abs(vertex[1] - 0.3) < 1e-10)
355 * vertex = Point<2>(0.3, 0.41);
356 * else if (std::abs(vertex[0] - 0.4) < 1e-10 &&
357 * std::abs(vertex[1] - 0.1) < 1e-10)
358 * vertex = Point<2>(0.3, 0);
359 * else if (std::abs(vertex[0] - 0.3) < 1e-10 &&
360 * std::abs(vertex[1] - 0.2) < 1e-10)
361 * vertex = Point<2>(0.3, 0.205);
362 * else if (std::abs(vertex[0] - 0.56379) < 1e-4 &&
363 * std::abs(vertex[1] - 0.13621) < 1e-4)
364 * vertex = Point<2>(0.59, 0.11);
365 * else if (std::abs(vertex[0] - 0.56379) < 1e-4 &&
366 * std::abs(vertex[1] - 0.26379) < 1e-4)
367 * vertex = Point<2>(0.59, 0.29);
368 * else if (std::abs(vertex[0] - 0.43621) < 1e-4 &&
369 * std::abs(vertex[1] - 0.13621) < 1e-4)
370 * vertex = Point<2>(0.41, 0.11);
371 * else if (std::abs(vertex[0] - 0.43621) < 1e-4 &&
372 * std::abs(vertex[1] - 0.26379) < 1e-4)
373 * vertex = Point<2>(0.41, 0.29);
378 * Refine once to create the same level of refinement as in the
379 * neighboring domains:
382 * middle.refine_global(1);
386 * Must copy the triangulation because we cannot merge triangulations with
390 * GridGenerator::flatten_triangulation(middle, tmp2);
394 * Left domain is required in 3d only.
399 * GridGenerator::merge_triangulations(tmp2, right, tria);
403 * GridGenerator::merge_triangulations(left, tmp2, tmp);
404 * GridGenerator::merge_triangulations(tmp, right, tria);
411 * <a name="time_dependent_navier_stokes.cc-2Dflowaroundcylindertriangulation"></a>
412 * <h4>2D flow around cylinder triangulation</h4>
415 * void create_triangulation(Triangulation<2> &tria)
417 * create_triangulation_2d(tria);
420 * Set the left boundary (inflow) to 0, the right boundary (outflow) to 1,
421 * upper to 2, lower to 3 and the cylindrical surface to 4.
424 * for (Triangulation<2>::active_cell_iterator cell = tria.begin();
425 * cell != tria.end();
428 * for (unsigned int f = 0; f < GeometryInfo<2>::faces_per_cell; ++f)
430 * if (cell->face(f)->at_boundary())
432 * if (std::abs(cell->face(f)->center()[0] - 2.5) < 1e-12)
434 * cell->face(f)->set_all_boundary_ids(1);
436 * else if (std::abs(cell->face(f)->center()[0] - 0.3) < 1e-12)
438 * cell->face(f)->set_all_boundary_ids(0);
440 * else if (std::abs(cell->face(f)->center()[1] - 0.41) < 1e-12)
442 * cell->face(f)->set_all_boundary_ids(3);
444 * else if (std::abs(cell->face(f)->center()[1]) < 1e-12)
446 * cell->face(f)->set_all_boundary_ids(2);
450 * cell->face(f)->set_all_boundary_ids(4);
460 * <a name="time_dependent_navier_stokes.cc-3Dflowaroundcylindertriangulation"></a>
461 * <h4>3D flow around cylinder triangulation</h4>
464 * void create_triangulation(Triangulation<3> &tria)
466 * Triangulation<2> tria_2d;
467 * create_triangulation_2d(tria_2d, false);
468 * GridGenerator::extrude_triangulation(tria_2d, 5, 0.41, tria);
471 * Set the ids of the boundaries in x direction to 0 and 1; y direction to 2 and 3;
472 * z direction to 4 and 5; the cylindrical surface 6.
475 * for (Triangulation<3>::active_cell_iterator cell = tria.begin();
476 * cell != tria.end();
479 * for (unsigned int f = 0; f < GeometryInfo<3>::faces_per_cell; ++f)
481 * if (cell->face(f)->at_boundary())
483 * if (std::abs(cell->face(f)->center()[0] - 2.5) < 1e-12)
485 * cell->face(f)->set_all_boundary_ids(1);
487 * else if (std::abs(cell->face(f)->center()[0]) < 1e-12)
489 * cell->face(f)->set_all_boundary_ids(0);
491 * else if (std::abs(cell->face(f)->center()[1] - 0.41) < 1e-12)
493 * cell->face(f)->set_all_boundary_ids(3);
495 * else if (std::abs(cell->face(f)->center()[1]) < 1e-12)
497 * cell->face(f)->set_all_boundary_ids(2);
499 * else if (std::abs(cell->face(f)->center()[2] - 0.41) < 1e-12)
501 * cell->face(f)->set_all_boundary_ids(5);
503 * else if (std::abs(cell->face(f)->center()[2]) < 1e-12)
505 * cell->face(f)->set_all_boundary_ids(4);
509 * cell->face(f)->set_all_boundary_ids(6);
519 * <a name="time_dependent_navier_stokes.cc-Timestepping"></a>
520 * <h3>Time stepping</h3>
521 * This class is pretty much self-explanatory.
527 * Time(const double time_end,
528 * const double delta_t,
529 * const double output_interval,
530 * const double refinement_interval)
533 * time_end(time_end),
535 * output_interval(output_interval),
536 * refinement_interval(refinement_interval)
539 * double current() const { return time_current; }
540 * double end() const { return time_end; }
541 * double get_delta_t() const { return delta_t; }
542 * unsigned int get_timestep() const { return timestep; }
543 * bool time_to_output() const;
544 * bool time_to_refine() const;
548 * unsigned int timestep;
549 * double time_current;
550 * const double time_end;
551 * const double delta_t;
552 * const double output_interval;
553 * const double refinement_interval;
556 * bool Time::time_to_output() const
558 * unsigned int delta = static_cast<unsigned int>(output_interval / delta_t);
559 * return (timestep >= delta && timestep % delta == 0);
562 * bool Time::time_to_refine() const
564 * unsigned int delta = static_cast<unsigned int>(refinement_interval / delta_t);
565 * return (timestep >= delta && timestep % delta == 0);
568 * void Time::increment()
570 * time_current += delta_t;
577 * <a name="time_dependent_navier_stokes.cc-Boundaryvalues"></a>
578 * <h3>Boundary values</h3>
579 * Dirichlet boundary conditions for the velocity inlet and walls.
583 * class BoundaryValues : public Function<dim>
586 * BoundaryValues() : Function<dim>(dim + 1) {}
587 * virtual double value(const Point<dim> &p,
588 * const unsigned int component) const override;
590 * virtual void vector_value(const Point<dim> &p,
591 * Vector<double> &values) const override;
595 * double BoundaryValues<dim>::value(const Point<dim> &p,
596 * const unsigned int component) const
598 * Assert(component < this->n_components,
599 * ExcIndexRange(component, 0, this->n_components));
600 * double left_boundary = (dim == 2 ? 0.3 : 0.0);
601 * if (component == 0 && std::abs(p[0] - left_boundary) < 1e-10)
605 * For a parabolic velocity profile, @f$U_\mathrm{avg} = 2/3
607 * in 2D, and @f$U_\mathrm{avg} = 4/9 U_\mathrm{max}@f$ in 3D.
608 * If @f$\nu = 0.001@f$, @f$D = 0.1@f$, then @f$Re = 100 U_\mathrm{avg}@f$.
612 * double Umax = (dim == 2 ? 3 * Uavg / 2 : 9 * Uavg / 4);
613 * double value = 4 * Umax * p[1] * (0.41 - p[1]) / (0.41 * 0.41);
616 * value *= 4 * p[2] * (0.41 - p[2]) / (0.41 * 0.41);
624 * void BoundaryValues<dim>::vector_value(const Point<dim> &p,
625 * Vector<double> &values) const
627 * for (unsigned int c = 0; c < this->n_components; ++c)
628 * values(c) = BoundaryValues<dim>::value(p, c);
634 * <a name="time_dependent_navier_stokes.cc-Blockpreconditioner"></a>
635 * <h3>Block preconditioner</h3>
639 * The block Schur preconditioner can be written as the product of three
642 * P^{-1} = \begin{pmatrix} \tilde{A}^{-1} & 0\\ 0 & I\end{pmatrix}
643 * \begin{pmatrix} I & -B^T\\ 0 & I\end{pmatrix}
644 * \begin{pmatrix} I & 0\\ 0 & \tilde{S}^{-1}\end{pmatrix}
646 * @f$\tilde{A}@f$ is symmetric since the convection term is eliminated from the
648 * @f$\tilde{S}^{-1}@f$ is the inverse of the Schur complement of @f$\tilde{A}@f$,
649 * which consists of a reaction term, a diffusion term, a Grad-Div term
650 * and a convection term.
651 * In practice, the convection contribution is ignored, namely
652 * @f$\tilde{S}^{-1} = -(\nu + \gamma)M_p^{-1} -
653 * \frac{1}{\Delta{t}}{[B(diag(M_u))^{-1}B^T]}^{-1}@f$
654 * where @f$M_p@f$ is the pressure mass, and
655 * @f${[B(diag(M_u))^{-1}B^T]}@f$ is an approximation to the Schur complement of
656 * (velocity) mass matrix @f$BM_u^{-1}B^T@f$.
660 * Same as the tutorials, we define a vmult operation for the block
662 * instead of write it as a matrix. It can be seen from the above definition,
663 * the result of the vmult operation of the block preconditioner can be
665 * from the results of the vmult operations of @f$M_u^{-1}@f$, @f$M_p^{-1}@f$,
666 * @f$\tilde{A}^{-1}@f$, which can be transformed into solving three symmetric
671 * class BlockSchurPreconditioner : public Subscriptor
674 * BlockSchurPreconditioner(
675 * TimerOutput &timer,
679 * const std::vector<IndexSet> &owned_partitioning,
680 * const PETScWrappers::MPI::BlockSparseMatrix &system,
681 * const PETScWrappers::MPI::BlockSparseMatrix &mass,
682 * PETScWrappers::MPI::BlockSparseMatrix &schur);
684 * void vmult(PETScWrappers::MPI::BlockVector &dst,
685 * const PETScWrappers::MPI::BlockVector &src) const;
688 * TimerOutput &timer;
689 * const double gamma;
690 * const double viscosity;
693 * const SmartPointer<const PETScWrappers::MPI::BlockSparseMatrix>
695 * const SmartPointer<const PETScWrappers::MPI::BlockSparseMatrix> mass_matrix;
698 * As discussed, @f${[B(diag(M_u))^{-1}B^T]}@f$ and its inverse
699 * need to be computed.
700 * We can either explicitly compute it out as a matrix, or define
701 * it as a class with a vmult operation.
702 * The second approach saves some computation to construct the matrix,
703 * but leads to slow convergence in CG solver because it is impossible
704 * to apply a preconditioner. We go with the first route.
707 * const SmartPointer<PETScWrappers::MPI::BlockSparseMatrix> mass_schur;
713 * <a name="time_dependent_navier_stokes.cc-BlockSchurPreconditionerBlockSchurPreconditioner"></a>
714 * <h4>BlockSchurPreconditioner::BlockSchurPreconditioner</h4>
718 * Input parameters and system matrix, mass matrix as well as the mass schur
719 * matrix are needed in the preconditioner. In addition, we pass the
720 * partitioning information into this class because we need to create some
721 * temporary block vectors inside.
724 * BlockSchurPreconditioner::BlockSchurPreconditioner(
725 * TimerOutput &timer,
729 * const std::vector<IndexSet> &owned_partitioning,
730 * const PETScWrappers::MPI::BlockSparseMatrix &system,
731 * const PETScWrappers::MPI::BlockSparseMatrix &mass,
732 * PETScWrappers::MPI::BlockSparseMatrix &schur)
735 * viscosity(viscosity),
737 * system_matrix(&system),
738 * mass_matrix(&mass),
741 * TimerOutput::Scope timer_section(timer, "CG for Sm");
744 * The schur complemete of mass matrix is actually being computed here.
747 * PETScWrappers::MPI::BlockVector tmp1, tmp2;
748 * tmp1.reinit(owned_partitioning, mass_matrix->get_mpi_communicator());
749 * tmp2.reinit(owned_partitioning, mass_matrix->get_mpi_communicator());
754 * Jacobi preconditioner of matrix A is by definition @f${diag(A)}^{-1}@f$,
755 * this is exactly what we want to compute.
758 * PETScWrappers::PreconditionJacobi jacobi(mass_matrix->block(0, 0));
759 * jacobi.vmult(tmp2.block(0), tmp1.block(0));
760 * system_matrix->block(1, 0).mmult(
761 * mass_schur->block(1, 1), system_matrix->block(0, 1), tmp2.block(0));
767 * <a name="time_dependent_navier_stokes.cc-BlockSchurPreconditionervmult"></a>
768 * <h4>BlockSchurPreconditioner::vmult</h4>
772 * The vmult operation strictly follows the definition of
773 * BlockSchurPreconditioner
774 * introduced above. Conceptually it computes @f$u = P^{-1}v@f$.
777 * void BlockSchurPreconditioner::vmult(
778 * PETScWrappers::MPI::BlockVector &dst,
779 * const PETScWrappers::MPI::BlockVector &src) const
786 * PETScWrappers::MPI::Vector utmp(src.block(0));
787 * PETScWrappers::MPI::Vector tmp(src.block(1));
791 * This block computes @f$u_1 = \tilde{S}^{-1} v_1@f$,
792 * where CG solvers are used for @f$M_p^{-1}@f$ and @f$S_m^{-1}@f$.
796 * TimerOutput::Scope timer_section(timer, "CG for Mp");
797 * SolverControl mp_control(src.block(1).size(),
798 * 1e-6 * src.block(1).l2_norm());
799 * PETScWrappers::SolverCG cg_mp(mp_control);
802 * @f$-(\nu + \gamma)M_p^{-1}v_1@f$
805 * PETScWrappers::PreconditionBlockJacobi Mp_preconditioner;
806 * Mp_preconditioner.initialize(mass_matrix->block(1, 1));
808 * mass_matrix->block(1, 1), tmp, src.block(1), Mp_preconditioner);
809 * tmp *= -(viscosity + gamma);
813 * @f$-\frac{1}{dt}S_m^{-1}v_1@f$
817 * TimerOutput::Scope timer_section(timer, "CG for Sm");
818 * SolverControl sm_control(src.block(1).size(),
819 * 1e-6 * src.block(1).l2_norm());
820 * PETScWrappers::SolverCG cg_sm(sm_control);
823 * PreconditionBlockJacobi works find on Sm if we do not refine the mesh.
824 * Because after refine_mesh is called, zero entries will be created on
825 * the diagonal (not sure why), which prevents PreconditionBlockJacobi
829 * PETScWrappers::PreconditionNone Sm_preconditioner;
830 * Sm_preconditioner.initialize(mass_schur->block(1, 1));
832 * mass_schur->block(1, 1), dst.block(1), src.block(1), Sm_preconditioner);
833 * dst.block(1) *= -1 / dt;
837 * Adding up these two, we get @f$\tilde{S}^{-1}v_1@f$.
840 * dst.block(1) += tmp;
843 * Compute @f$v_0 - B^T\tilde{S}^{-1}v_1@f$ based on @f$u_1@f$.
846 * system_matrix->block(0, 1).vmult(utmp, dst.block(1));
848 * utmp += src.block(0);
851 * Finally, compute the product of @f$\tilde{A}^{-1}@f$ and utmp
852 * using another CG solver.
856 * TimerOutput::Scope timer_section(timer, "CG for A");
857 * SolverControl a_control(src.block(0).size(),
858 * 1e-6 * src.block(0).l2_norm());
859 * PETScWrappers::SolverCG cg_a(a_control);
862 * We do not use any preconditioner for this block, which is of course
864 * only because the performance of the only two preconditioners available
865 * PreconditionBlockJacobi and PreconditionBoomerAMG are even worse than
869 * PETScWrappers::PreconditionNone A_preconditioner;
870 * A_preconditioner.initialize(system_matrix->block(0, 0));
872 * system_matrix->block(0, 0), dst.block(0), utmp, A_preconditioner);
879 * <a name="time_dependent_navier_stokes.cc-TheincompressibleNavierStokessolver"></a>
880 * <h3>The incompressible Navier-Stokes solver</h3>
884 * Parallel incompressible Navier Stokes equation solver using
885 * implicit-explicit time scheme.
886 * This program is built upon dealii tutorials @ref step_57 "step-57", @ref step_40 "step-40", @ref step_22 "step-22",
887 * and @ref step_20 "step-20".
888 * The system equation is written in the incremental form, and we treat
889 * the convection term explicitly. Therefore the system equation is linear
890 * and symmetric, which does not need to be solved with Newton's iteration.
891 * The system is further stabilized and preconditioned with Grad-Div method,
892 * where GMRES solver is used as the outer solver.
901 * ~InsIMEX() { timer.print_summary(); }
905 *
void make_constraints();
906 *
void initialize_system();
907 *
void assemble(
bool use_nonzero_constraints,
bool assemble_system);
908 * std::pair<unsigned int, double> solve(
bool use_nonzero_constraints,
909 *
bool assemble_system);
910 *
void refine_mesh(
const unsigned int,
const unsigned int);
911 *
void output_results(
const unsigned int)
const;
914 *
const unsigned int degree;
915 * std::vector<types::global_dof_index> dofs_per_block;
921 *
QGauss<dim - 1> face_quad_formula;
929 * System
matrix to be solved
935 * Mass
matrix is a block
matrix which includes both velocity
942 * The schur complement of mass
matrix is not a block
matrix.
943 * However, because we want to reuse the
partition we created
944 *
for the system
matrix, it is defined as a block
matrix
945 * where only
one block is actually used.
951 * The latest known solution.
957 * The increment at a certain time step.
974 * The IndexSets of owned velocity and pressure respectively.
977 * std::vector<IndexSet> owned_partitioning;
981 * The IndexSets of relevant velocity and pressure respectively.
984 * std::vector<IndexSet> relevant_partitioning;
988 * The
IndexSet of all relevant dofs.
995 * The BlockSchurPreconditioner
for the entire system.
998 * std::shared_ptr<BlockSchurPreconditioner> preconditioner;
1007 * <a name=
"time_dependent_navier_stokes.cc-InsIMEXInsIMEX"></a>
1008 * <h4>InsIMEX::InsIMEX</h4>
1011 *
template <
int dim>
1013 * : viscosity(0.001),
1016 * triangulation(tria),
1018 * dof_handler(triangulation),
1019 * volume_quad_formula(degree + 2),
1020 * face_quad_formula(degree + 2),
1021 * mpi_communicator(MPI_COMM_WORLD),
1023 * time(1e0, 1e-3, 1e-2, 1e-2),
1032 * <a name=
"time_dependent_navier_stokes.cc-InsIMEXsetup_dofs"></a>
1033 * <h4>InsIMEX::setup_dofs</h4>
1036 *
template <
int dim>
1037 *
void InsIMEX<dim>::setup_dofs()
1041 * The first step is to associate DoFs with a given mesh.
1044 * dof_handler.distribute_dofs(fe);
1047 * We renumber the components to have all velocity DoFs come before
1048 * the pressure DoFs to be able to
split the solution vector in two blocks
1049 * which are separately accessed in the block preconditioner.
1053 * std::vector<unsigned int> block_component(dim + 1, 0);
1054 * block_component[dim] = 1;
1062 *
unsigned int dof_u = dofs_per_block[0];
1063 *
unsigned int dof_p = dofs_per_block[1];
1064 * owned_partitioning.resize(2);
1065 * owned_partitioning[0] = dof_handler.locally_owned_dofs().get_view(0, dof_u);
1066 * owned_partitioning[1] =
1067 * dof_handler.locally_owned_dofs().get_view(dof_u, dof_u + dof_p);
1069 * relevant_partitioning.resize(2);
1070 * relevant_partitioning[0] = locally_relevant_dofs.get_view(0, dof_u);
1071 * relevant_partitioning[1] =
1072 * locally_relevant_dofs.get_view(dof_u, dof_u + dof_p);
1073 * pcout <<
" Number of active fluid cells: "
1074 * << triangulation.n_global_active_cells() << std::endl
1075 * <<
" Number of degrees of freedom: " << dof_handler.n_dofs() <<
" ("
1076 * << dof_u <<
'+' << dof_p <<
')' << std::endl;
1082 * <a name=
"time_dependent_navier_stokes.cc-InsIMEXmake_constraints"></a>
1083 * <h4>InsIMEX::make_constraints</h4>
1086 *
template <
int dim>
1087 *
void InsIMEX<dim>::make_constraints()
1091 * Because the equation is written in incremental form, two constraints
1092 * are needed:
nonzero constraint and
zero constraint.
1095 * nonzero_constraints.clear();
1096 * zero_constraints.clear();
1097 * nonzero_constraints.reinit(locally_relevant_dofs);
1098 * zero_constraints.reinit(locally_relevant_dofs);
1104 * Apply Dirichlet boundary conditions on all boundaries except
for the
1108 * std::vector<unsigned int> dirichlet_bc_ids;
1110 * dirichlet_bc_ids = std::vector<unsigned int>{0, 2, 3, 4};
1112 * dirichlet_bc_ids = std::vector<unsigned int>{0, 2, 3, 4, 5, 6};
1115 *
for (
auto id : dirichlet_bc_ids)
1119 * BoundaryValues<dim>(),
1120 * nonzero_constraints,
1121 * fe.component_mask(velocities));
1127 * fe.component_mask(velocities));
1129 * nonzero_constraints.close();
1130 * zero_constraints.close();
1136 * <a name=
"time_dependent_navier_stokes.cc-InsIMEXinitialize_system"></a>
1137 * <h4>InsIMEX::initialize_system</h4>
1140 *
template <
int dim>
1141 *
void InsIMEX<dim>::initialize_system()
1143 * preconditioner.reset();
1144 * system_matrix.clear();
1146 * mass_schur.clear();
1150 * sparsity_pattern.copy_from(dsp);
1153 * dof_handler.locally_owned_dofs(),
1155 * locally_relevant_dofs);
1157 * system_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
1158 *
mass_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
1162 * Only the @f$(1, 1)@f$ block in the mass schur
matrix is used.
1163 * Compute the sparsity pattern
for mass schur in
advance.
1164 * The only
nonzero block has the same sparsity pattern as @f$BB^
T@f$.
1168 * schur_dsp.block(1, 1).compute_mmult_pattern(sparsity_pattern.block(1, 0),
1169 * sparsity_pattern.block(0, 1));
1170 * mass_schur.reinit(owned_partitioning, schur_dsp, mpi_communicator);
1174 * present_solution is ghosted because it is used in the
1178 * present_solution.reinit(
1179 * owned_partitioning, relevant_partitioning, mpi_communicator);
1182 * solution_increment is non-ghosted because the linear solver needs
1183 * a completely distributed vector.
1186 * solution_increment.reinit(owned_partitioning, mpi_communicator);
1189 * system_rhs is non-ghosted because it is only used in the linear
1190 * solver and residual evaluation.
1193 * system_rhs.reinit(owned_partitioning, mpi_communicator);
1199 * <a name=
"time_dependent_navier_stokes.cc-InsIMEXassemble"></a>
1200 * <h4>InsIMEX::assemble</h4>
1204 * Assemble the system
matrix, mass
matrix, and the RHS.
1205 * It can be used to
assemble the entire system or only the RHS.
1206 * An additional option is added to determine whether
nonzero
1207 * constraints or
zero constraints should be used.
1208 * Note that we only need to
assemble the LHS
for twice: once with the
nonzero
1210 * and once
for zero constraint. But we must
assemble the RHS at every time
1214 *
template <
int dim>
1215 *
void InsIMEX<dim>::assemble(
bool use_nonzero_constraints,
1216 *
bool assemble_system)
1220 *
if (assemble_system)
1222 * system_matrix = 0;
1228 * volume_quad_formula,
1232 * face_quad_formula,
1237 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1238 *
const unsigned int n_q_points = volume_quad_formula.size();
1247 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1249 * std::vector<Tensor<1, dim>> current_velocity_values(n_q_points);
1250 * std::vector<Tensor<2, dim>> current_velocity_gradients(n_q_points);
1251 * std::vector<double> current_velocity_divergences(n_q_points);
1252 * std::vector<double> current_pressure_values(n_q_points);
1254 * std::vector<double> div_phi_u(dofs_per_cell);
1255 * std::vector<Tensor<1, dim>> phi_u(dofs_per_cell);
1256 * std::vector<Tensor<2, dim>> grad_phi_u(dofs_per_cell);
1257 * std::vector<double> phi_p(dofs_per_cell);
1259 *
for (
auto cell = dof_handler.begin_active(); cell != dof_handler.end();
1262 *
if (cell->is_locally_owned())
1264 * fe_values.reinit(cell);
1266 *
if (assemble_system)
1269 * local_mass_matrix = 0;
1273 * fe_values[velocities].get_function_values(present_solution,
1274 * current_velocity_values);
1276 * fe_values[velocities].get_function_gradients(
1277 * present_solution, current_velocity_gradients);
1279 * fe_values[velocities].get_function_divergences(
1280 * present_solution, current_velocity_divergences);
1282 * fe_values[pressure].get_function_values(present_solution,
1283 * current_pressure_values);
1287 * Assemble the system
matrix and mass
matrix simultaneouly.
1288 * The mass
matrix only uses the @f$(0, 0)@f$ and @f$(1, 1)@f$ blocks.
1291 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1293 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
1295 * div_phi_u[k] = fe_values[velocities].divergence(k, q);
1296 * grad_phi_u[k] = fe_values[velocities].gradient(k, q);
1297 * phi_u[k] = fe_values[velocities].value(k, q);
1298 * phi_p[k] = fe_values[pressure].value(k, q);
1301 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1303 *
if (assemble_system)
1305 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1307 * local_matrix(i, j) +=
1310 * div_phi_u[i] * phi_p[j] -
1311 * phi_p[i] * div_phi_u[j] +
1312 * gamma * div_phi_u[j] * div_phi_u[i] +
1313 * phi_u[i] * phi_u[j] / time.get_delta_t()) *
1315 * local_mass_matrix(i, j) +=
1316 * (phi_u[i] * phi_u[j] + phi_p[i] * phi_p[j]) *
1323 * current_velocity_divergences[q] * phi_p[i] -
1324 * current_pressure_values[q] * div_phi_u[i] +
1325 * gamma * current_velocity_divergences[q] * div_phi_u[i] +
1326 * current_velocity_gradients[q] *
1327 * current_velocity_values[q] * phi_u[i]) *
1332 * cell->get_dof_indices(local_dof_indices);
1335 * use_nonzero_constraints ? nonzero_constraints : zero_constraints;
1336 *
if (assemble_system)
1338 * constraints_used.distribute_local_to_global(local_matrix,
1340 * local_dof_indices,
1343 * constraints_used.distribute_local_to_global(
1344 * local_mass_matrix, local_dof_indices, mass_matrix);
1348 * constraints_used.distribute_local_to_global(
1349 * local_rhs, local_dof_indices, system_rhs);
1354 *
if (assemble_system)
1365 * <a name=
"time_dependent_navier_stokes.cc-InsIMEXsolve"></a>
1366 * <h4>InsIMEX::solve</h4>
1367 * Solve the linear system
using FGMRES solver with block preconditioner.
1369 * in assembly must be used again, to set the constrained
value.
1370 * The second argument is used to determine whether the block
1371 * preconditioner should be reset or not.
1374 *
template <
int dim>
1375 * std::pair<unsigned int, double>
1376 * InsIMEX<dim>::solve(
bool use_nonzero_constraints,
bool assemble_system)
1378 *
if (assemble_system)
1380 * preconditioner.reset(
new BlockSchurPreconditioner(timer,
1383 * time.get_delta_t(),
1384 * owned_partitioning,
1391 * system_matrix.m(), 1e-8 * system_rhs.l2_norm(),
true);
1405 * The solution vector must be non-ghosted
1408 * gmres.solve(system_matrix, solution_increment, system_rhs, *preconditioner);
1411 * use_nonzero_constraints ? nonzero_constraints : zero_constraints;
1412 * constraints_used.
distribute(solution_increment);
1414 *
return {solver_control.last_step(), solver_control.last_value()};
1420 * <a name=
"time_dependent_navier_stokes.cc-InsIMEXrun"></a>
1421 * <h4>InsIMEX::run</h4>
1424 *
template <
int dim>
1425 *
void InsIMEX<dim>::run()
1427 * pcout <<
"Running with PETSc on "
1429 * <<
" MPI rank(s)..." << std::endl;
1431 * triangulation.refine_global(0);
1433 * make_constraints();
1434 * initialize_system();
1441 *
bool refined =
false;
1442 *
while (time.end() - time.current() > 1e-12)
1444 *
if (time.get_timestep() == 0)
1446 * output_results(0);
1449 * std::cout.precision(6);
1450 * std::cout.width(12);
1451 * pcout << std::string(96,
'*') << std::endl
1452 * <<
"Time step = " << time.get_timestep()
1453 * <<
", at t = " << std::scientific << time.current() << std::endl;
1459 * solution_increment = 0;
1462 * Only use
nonzero constraints at the very first time step
1465 *
bool apply_nonzero_constraints = (time.get_timestep() == 1);
1469 * once
using nonzero_constraints, once
using zero_constraints,
1470 * as well as the steps immediately after mesh refinement.
1473 *
bool assemble_system = (time.get_timestep() < 3 || refined);
1475 *
assemble(apply_nonzero_constraints, assemble_system);
1476 *
auto state = solve(apply_nonzero_constraints, assemble_system);
1479 * Note we have to use a non-ghosted vector to
do the addition.
1483 * tmp.
reinit(owned_partitioning, mpi_communicator);
1484 * tmp = present_solution;
1485 * tmp += solution_increment;
1486 * present_solution = tmp;
1487 * pcout << std::scientific << std::left <<
" GMRES_ITR = " << std::setw(3)
1488 * << state.first <<
" GMRES_RES = " << state.second << std::endl;
1494 *
if (time.time_to_output())
1496 * output_results(time.get_timestep());
1498 *
if (time.time_to_refine())
1500 * refine_mesh(0, 4);
1509 * <a name=
"time_dependent_navier_stokes.cc-InsIMEXoutput_result"></a>
1510 * <h4>InsIMEX::output_result</h4>
1516 *
template <
int dim>
1517 *
void InsIMEX<dim>::output_results(
const unsigned int output_index)
const
1520 * pcout <<
"Writing results..." << std::endl;
1521 * std::vector<std::string> solution_names(dim,
"velocity");
1522 * solution_names.push_back(
"pressure");
1524 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1525 * data_component_interpretation(
1527 * data_component_interpretation.push_back(
1533 * vector to be output must be ghosted
1536 * data_out.add_data_vector(present_solution,
1539 * data_component_interpretation);
1547 *
for (
unsigned int i = 0; i < subdomain.size(); ++i)
1549 * subdomain(i) = triangulation.locally_owned_subdomain();
1551 * data_out.add_data_vector(subdomain,
"subdomain");
1553 * data_out.build_patches(degree + 1);
1555 * std::string basename =
1558 * std::string filename =
1563 * std::ofstream output(filename);
1564 * data_out.write_vtu(output);
1566 *
static std::vector<std::pair<double, std::string>> times_and_names;
1569 *
for (
unsigned int i = 0;
1573 * times_and_names.push_back(
1577 * std::ofstream pvd_output(
"navierstokes.pvd");
1585 * <a name=
"time_dependent_navier_stokes.cc-InsIMEXrefine_mesh"></a>
1586 * <h4>InsIMEX::refine_mesh</h4>
1592 *
template <
int dim>
1593 *
void InsIMEX<dim>::refine_mesh(
const unsigned int min_grid_level,
1594 *
const unsigned int max_grid_level)
1597 * pcout <<
"Refining mesh..." << std::endl;
1599 *
Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1602 * face_quad_formula,
1605 * estimated_error_per_cell,
1606 * fe.component_mask(velocity));
1608 * triangulation, estimated_error_per_cell, 0.6, 0.4);
1609 *
if (triangulation.n_levels() > max_grid_level)
1611 *
for (
auto cell = triangulation.begin_active(max_grid_level);
1612 * cell != triangulation.end();
1615 * cell->clear_refine_flag();
1618 *
for (
auto cell = triangulation.begin_active(min_grid_level);
1619 * cell != triangulation.end_active(min_grid_level);
1622 * cell->clear_coarsen_flag();
1627 * Prepare to transfer
1632 * trans(dof_handler);
1634 * triangulation.prepare_coarsening_and_refinement();
1636 * trans.prepare_for_coarsening_and_refinement(present_solution);
1643 * triangulation.execute_coarsening_and_refinement();
1647 * Reinitialize the system
1651 * make_constraints();
1652 * initialize_system();
1657 * Need a non-ghosted vector
for interpolation
1662 * trans.interpolate(tmp);
1663 * present_solution = tmp;
1670 * <a name=
"time_dependent_navier_stokes.cc-mainfunction"></a>
1671 * <h3>main function</h3>
1677 *
int main(
int argc,
char *argv[])
1681 *
using namespace dealii;
1682 *
using namespace fluid;
1687 * InsIMEX<2> flow(tria);
1690 *
catch (std::exception &exc)
1692 * std::cerr << std::endl
1694 * <<
"----------------------------------------------------"
1696 * std::cerr <<
"Exception on processing: " << std::endl
1697 * << exc.what() << std::endl
1698 * <<
"Aborting!" << std::endl
1699 * <<
"----------------------------------------------------"
1705 * std::cerr << std::endl
1707 * <<
"----------------------------------------------------"
1709 * std::cerr <<
"Unknown exception!" << std::endl
1710 * <<
"Aborting!" << std::endl
1711 * <<
"----------------------------------------------------"
void distribute(VectorType &vec) const
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
const bool IsBlockVector< VectorType >::value
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
@ component_is_part_of_vector
void write_pvd_record(std::ostream &out, const std::vector< std::pair< double, std::string > > ×_and_names)
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void Cuthill_McKee(DoFHandler< dim, spacedim > &dof_handler, const bool reversed_numbering=false, const bool use_constraints=false, const std::vector< types::global_dof_index > &starting_indices=std::vector< types::global_dof_index >())
void create_triangulation(Triangulation< dim, dim > &tria, const AdditionalData &additional_data=AdditionalData())
@ matrix
Contents is actually a matrix.
constexpr types::blas_int zero
constexpr types::blas_int one
void mass_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const double factor=1.)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
long double gamma(const unsigned int n)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
::SolutionTransfer< dim, VectorType, spacedim > SolutionTransfer
void advance(std::tuple< I1, I2 > &t, const unsigned int n)
constexpr ProductType< Number, OtherNumber >::type scalar_product(const Tensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)