deal.II version 9.7.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
coupled_laplace_problem.h
Go to the documentation of this file.
1
168 *  
169 * @endcode
170 *
171 * The included deal.II header files are the same as in the other example
172 * programs:
173 *
174 * @code
175 *   #include <deal.II/base/function.h>
176 *   #include <deal.II/base/logstream.h>
177 *   #include <deal.II/base/quadrature_lib.h>
178 *  
179 *   #include <deal.II/dofs/dof_accessor.h>
180 *   #include <deal.II/dofs/dof_handler.h>
181 *   #include <deal.II/dofs/dof_tools.h>
182 *  
183 *   #include <deal.II/fe/fe_q.h>
184 *   #include <deal.II/fe/fe_values.h>
185 *  
186 *   #include <deal.II/grid/grid_generator.h>
187 *   #include <deal.II/grid/tria.h>
188 *   #include <deal.II/grid/tria_accessor.h>
189 *   #include <deal.II/grid/tria_iterator.h>
190 *  
191 *   #include <deal.II/lac/dynamic_sparsity_pattern.h>
192 *   #include <deal.II/lac/full_matrix.h>
193 *   #include <deal.II/lac/precondition.h>
194 *   #include <deal.II/lac/solver_cg.h>
195 *   #include <deal.II/lac/sparse_matrix.h>
196 *   #include <deal.II/lac/vector.h>
197 *  
198 *   #include <deal.II/numerics/data_out.h>
199 *   #include <deal.II/numerics/matrix_tools.h>
200 *   #include <deal.II/numerics/vector_tools.h>
201 * @endcode
202 *
203 * In addition to the deal.II header files, we include the preCICE API in order
204 * to obtain access to preCICE specific functionality
205 *
206 * @code
207 *   #include <precice/precice.hpp>
208 *  
209 *   #include <fstream>
210 *   #include <iostream>
211 *  
212 *   using namespace dealii;
213 *  
214 * @endcode
215 *
216 * Configuration parameters
217 *
218
219 *
220 * We set up a simple hard-coded struct containing all names we need for
221 * external coupling. The struct includes the name of the preCICE
222 * configuration file as well as the name of the simulation participant, the
223 * name of the coupling mesh and the name of the exchanged data. The last three
224 * names you also find in the preCICE configuration file. For real application
225 * cases, these names are better handled by a parameter file.
226 *
227 * @code
228 *   struct CouplingParamters
229 *   {
230 *   const std::string config_file = "precice-config.xml";
231 *   const std::string participant_name = "laplace-solver";
232 *   const std::string mesh_name = "dealii-mesh";
233 *   const std::string read_data_name = "boundary-data";
234 *   };
235 *  
236 *  
237 * @endcode
238 *
239 * The Adapter class
240 *
241
242 *
243 * The Adapter class handles all functionalities to couple the deal.II solver
244 * code to other solvers with preCICE, i.e., data structures are set up and all
245 * relevant information is passed to preCICE.
246 *
247
248 *
249 *
250 * @code
251 *   template <int dim, typename ParameterClass>
252 *   class Adapter
253 *   {
254 *   public:
255 *   Adapter(const ParameterClass & parameters,
256 *   const types::boundary_id dealii_boundary_interface_id);
257 *  
258 *   void
259 *   initialize(const DoFHandler<dim> & dof_handler,
260 *   std::map<types::global_dof_index, double> &boundary_data,
261 *   const MappingQGeneric<dim> & mapping);
262 *  
263 *   void
264 *   read_data(double relative_read_time,
265 *   std::map<types::global_dof_index, double> &boundary_data);
266 *  
267 *   void
268 *   advance(const double computed_timestep_length);
269 *  
270 * @endcode
271 *
272 * public precCICE solver interface
273 *
274 * @code
275 *   precice::Participant precice;
276 *  
277 * @endcode
278 *
279 * Boundary ID of the deal.II triangulation, associated with the coupling
280 * interface. The variable is defined in the constructor of this class and
281 * intentionally public so that it can be used during the grid generation and
282 * system assembly. The only thing, one needs to make sure is that this ID is
283 * unique for a particular triangulation.
284 *
285 * @code
286 *   const unsigned int dealii_boundary_interface_id;
287 *  
288 *   private:
289 * @endcode
290 *
291 * preCICE related initializations
292 * These variables are specified in and read from a parameter file, which is
293 * in this simple tutorial program the CouplingParameter struct already
294 * introduced in the beginning.
295 *
296 * @code
297 *   const std::string mesh_name;
298 *   const std::string read_data_name;
299 *  
300 * @endcode
301 *
302 * The node IDs are filled by preCICE during the initialization and associated to
303 * the spherical vertices we pass to preCICE
304 *
305 * @code
306 *   int n_interface_nodes;
307 *  
308 * @endcode
309 *
310 * DoF IndexSet, containing relevant coupling DoF indices at the coupling
311 * boundary
312 *
313 * @code
314 *   IndexSet coupling_dofs;
315 *  
316 * @endcode
317 *
318 * Data containers which are passed to preCICE in an appropriate preCICE
319 * specific format
320 *
321 * @code
322 *   std::vector<int> interface_nodes_ids;
323 *   std::vector<double> read_data_buffer;
324 *  
325 * @endcode
326 *
327 * The MPI rank and total number of MPI ranks is required by preCICE when the
328 * Participant is created. Since this tutorial runs only in serial mode we
329 * define the variables manually in this class instead of using the regular
330 * MPI interface.
331 *
332 * @code
333 *   static constexpr int this_mpi_process = 0;
334 *   static constexpr int n_mpi_processes = 1;
335 *  
336 * @endcode
337 *
338 * Function to transform the obtained data from preCICE into an appropriate
339 * map for Dirichlet boundary conditions
340 *
341 * @code
342 *   void
343 *   format_precice_to_dealii(
344 *   std::map<types::global_dof_index, double> &boundary_data) const;
345 *   };
346 *  
347 *  
348 *  
349 * @endcode
350 *
351 * In the constructor of the Adapter class, we set up the preCICE
352 * Participant. We need to tell preCICE our name as participant of the
353 * simulation and the name of the preCICE configuration file. Both have already
354 * been specified in the CouplingParameter class above. Thus, we pass the class
355 * directly to the constructor and read out all relevant information. As a
356 * second parameter, we need to specify the boundary ID of our triangulation,
357 * which is associated with the coupling interface.
358 *
359 * @code
360 *   template <int dim, typename ParameterClass>
361 *   Adapter<dim, ParameterClass>::Adapter(
362 *   const ParameterClass & parameters,
363 *   const types::boundary_id deal_boundary_interface_id)
364 *   : precice(parameters.participant_name,
365 *   parameters.config_file,
366 *   this_mpi_process,
367 *   n_mpi_processes)
368 *   , dealii_boundary_interface_id(deal_boundary_interface_id)
369 *   , mesh_name(parameters.mesh_name)
370 *   , read_data_name(parameters.read_data_name)
371 *   {}
372 *  
373 *  
374 *  
375 * @endcode
376 *
377 * This function initializes preCICE (e.g. establishes communication channels
378 * and allocates memory) and passes all relevant data to preCICE. For surface
379 * coupling, relevant data is in particular the location of the data points at
380 * the associated interface(s). The `boundary_data` is an empty map, which is
381 * filled by preCICE, i.e., information of the other participant. Throughout
382 * the system assembly, the map can directly be used in order to apply the
383 * Dirichlet boundary conditions in the linear system.
384 *
385 * @code
386 *   template <int dim, typename ParameterClass>
387 *   void
388 *   Adapter<dim, ParameterClass>::initialize(
389 *   const DoFHandler<dim> & dof_handler,
390 *   std::map<types::global_dof_index, double> &boundary_data,
391 *   const MappingQGeneric<dim> & mapping)
392 *   {
393 *   Assert(dim > 1, ExcNotImplemented());
394 *   AssertDimension(dim, precice.getMeshDimensions(mesh_name));
395 *  
396 * @endcode
397 *
398 * Afterwards, we extract the number of interface nodes and the coupling DoFs
399 * at the coupling interface from our deal.II solver via
401 *
402 * @code
403 *   std::set<types::boundary_id> couplingBoundary;
404 *   couplingBoundary.insert(dealii_boundary_interface_id);
405 *  
406 * @endcode
407 *
408 * The `ComponentMask()` might be important in case we deal with vector valued
409 * problems, because vector valued problems have a DoF for each component.
410 *
411 * @code
412 *   coupling_dofs = DoFTools::extract_boundary_dofs(dof_handler,
413 *   ComponentMask(),
414 *   couplingBoundary);
415 *  
416 * @endcode
417 *
418 * The coupling DoFs are used to set up the `boundary_data` map. At the end,
419 * we associate here each DoF with a respective boundary value.
420 *
421 * @code
422 *   for (const auto i : coupling_dofs)
423 *   boundary_data[i] = 0.0;
424 *  
425 * @endcode
426 *
427 * Since we deal with a scalar problem, the number of DoFs at the particular
428 * interface corresponds to the number of interface nodes.
429 *
430 * @code
431 *   n_interface_nodes = coupling_dofs.n_elements();
432 *  
433 *   std::cout << "\t Number of coupling nodes: " << n_interface_nodes
434 *   << std::endl;
435 *  
436 * @endcode
437 *
438 * Now, we need to tell preCICE the coordinates of the interface nodes. Hence,
439 * we set up a std::vector to pass the node positions to preCICE. Each node is
440 * specified only once.
441 *
442 * @code
443 *   std::vector<double> interface_nodes_positions;
444 *   interface_nodes_positions.reserve(dim * n_interface_nodes);
445 *  
446 * @endcode
447 *
448 * Set up the appropriate size of the data container needed for data
449 * exchange. Here, we deal with a scalar problem, so that only a scalar value
450 * is read/written per interface node.
451 *
452 * @code
453 *   read_data_buffer.resize(n_interface_nodes);
454 * @endcode
455 *
456 * The IDs are filled by preCICE during the initializations.
457 *
458 * @code
459 *   interface_nodes_ids.resize(n_interface_nodes);
460 *  
461 * @endcode
462 *
463 * The node location is obtained using `map_dofs_to_support_points()`.
464 *
465 * @code
466 *   std::map<types::global_dof_index, Point<dim>> support_points;
467 *   DoFTools::map_dofs_to_support_points(mapping, dof_handler, support_points);
468 *  
469 * @endcode
470 *
471 * `support_points` contains now the coordinates of all DoFs. In the next
472 * step, the relevant coordinates are extracted using the IndexSet with the
473 * extracted coupling_dofs.
474 *
475 * @code
476 *   for (const auto element : coupling_dofs)
477 *   for (int i = 0; i < dim; ++i)
478 *   interface_nodes_positions.push_back(support_points[element][i]);
479 *  
480 * @endcode
481 *
482 * Now we have all information to define the coupling mesh and pass the
483 * information to preCICE.
484 *
485 * @code
486 *   precice.setMeshVertices(mesh_name,
487 *   interface_nodes_positions,
488 *   interface_nodes_ids);
489 *  
490 * @endcode
491 *
492 * Then, we initialize preCICE internally calling the API function
493 * `initialize()`
494 *
495 * @code
496 *   precice.initialize();
497 *   }
498 *  
499 *   template <int dim, typename ParameterClass>
500 *   void
501 *   Adapter<dim, ParameterClass>::read_data(
502 *   double relative_read_time,
503 *   std::map<types::global_dof_index, double> &boundary_data)
504 *   {
505 * @endcode
506 *
507 * here, we obtain data, i.e. the boundary condition, from another
508 * participant. We have already vertex IDs and just need to convert our
509 * obtained data to the deal.II compatible 'boundary map' , which is done in
510 * the format_deal_to_precice function.
511 *
512 * @code
513 *   precice.readData(mesh_name,
514 *   read_data_name,
515 *   interface_nodes_ids,
516 *   relative_read_time,
517 *   read_data_buffer);
518 *  
519 * @endcode
520 *
521 * After receiving the coupling data in `read_data_buffer`, we convert it to
522 * the std::map `boundary_data` which is later needed in order to apply
523 * Dirichlet boundary conditions
524 *
525 * @code
526 *   format_precice_to_dealii(boundary_data);
527 *   }
528 *  
529 *  
530 * @endcode
531 *
532 * The function `advance()` is called in the main time loop after the
533 * computation in each time step. Here, preCICE exchanges the coupling data
534 * internally and computes mappings as well as acceleration methods.
535 *
536 * @code
537 *   template <int dim, typename ParameterClass>
538 *   void
539 *   Adapter<dim, ParameterClass>::advance(const double computed_timestep_length)
540 *   {
541 * @endcode
542 *
543 * We specify the computed time-step length and pass it to preCICE.
544 *
545 * @code
546 *   precice.advance(computed_timestep_length);
547 *   }
548 *  
549 *  
550 *  
551 * @endcode
552 *
553 * This function takes the std::vector obtained by preCICE in `read_data_buffer` and
554 * inserts the values to the right position in the boundary map used throughout
555 * our deal.II solver for Dirichlet boundary conditions. The function is only
556 * used internally in the Adapter class and not called in the solver itself. The
557 * order, in which preCICE sorts the data in the `read_data_buffer` vector is exactly
558 * the same as the order of the initially passed vertices coordinates.
559 *
560 * @code
561 *   template <int dim, typename ParameterClass>
562 *   void
563 *   Adapter<dim, ParameterClass>::format_precice_to_dealii(
564 *   std::map<types::global_dof_index, double> &boundary_data) const
565 *   {
566 * @endcode
567 *
568 * We already stored the coupling DoF indices in the `boundary_data` map, so
569 * that we can simply iterate over all keys in the map.
570 *
571 * @code
572 *   auto dof_component = boundary_data.begin();
573 *   for (int i = 0; i < n_interface_nodes; ++i)
574 *   {
575 *   AssertIndexRange(i, read_data_buffer.size());
576 *   boundary_data[dof_component->first] = read_data_buffer[i];
577 *   ++dof_component;
578 *   }
579 *   }
580 *  
581 *  
582 * @endcode
583 *
584 * The solver class is essentially the same as in @ref step_4 "step-4". We only extend the
585 * stationary problem to a time-dependent problem and introduced the coupling.
586 * Comments are added at any point, where the workflow differs from @ref step_4 "step-4".
587 *
588 * @code
589 *   template <int dim>
590 *   class CoupledLaplaceProblem
591 *   {
592 *   public:
593 *   CoupledLaplaceProblem();
594 *  
595 *   void
596 *   run();
597 *  
598 *   private:
599 *   void
600 *   make_grid();
601 *   void
602 *   setup_system();
603 *   void
604 *   assemble_system();
605 *   void
606 *   solve();
607 *   void
608 *   output_results() const;
609 *  
610 *   Triangulation<dim> triangulation;
611 *   FE_Q<dim> fe;
612 *   DoFHandler<dim> dof_handler;
614 *  
615 *   SparsityPattern sparsity_pattern;
616 *   SparseMatrix<double> system_matrix;
617 *  
618 *   Vector<double> solution;
619 *   Vector<double> old_solution;
620 *   Vector<double> system_rhs;
621 *  
622 * @endcode
623 *
624 * We allocate all structures required for the preCICE coupling: The map
625 * is used to apply Dirichlet boundary conditions and filled in the Adapter
626 * class with data from the other participant. The CouplingParameters hold the
627 * preCICE configuration as described above. The interface boundary ID is the
628 * ID associated to our coupling interface and needs to be specified, when we
629 * set up the Adapter class object, because we pass it directly to the
630 * Constructor of this class.
631 *
632 * @code
633 *   std::map<types::global_dof_index, double> boundary_data;
634 *   CouplingParamters parameters;
635 *   const types::boundary_id interface_boundary_id;
636 *   Adapter<dim, CouplingParamters> adapter;
637 *  
638 * @endcode
639 *
640 * The time-step size delta_t is the actual time-step size used for all
641 * computations. The preCICE time-step size is obtained by preCICE in order to
642 * ensure a synchronization at all coupling time steps. The solver time
643 * step-size is the desired time-step size of our individual solver. In more
644 * sophisticated computations, it might be determined adaptively. The
645 * `time_step` counter is just used for the time-step number.
646 *
647 * @code
648 *   double delta_t;
649 *   double precice_delta_t;
650 *   const double solver_delta_t = 0.1;
651 *   unsigned int time_step = 0;
652 *   };
653 *  
654 *  
655 *  
656 *   template <int dim>
657 *   class RightHandSide : public Function<dim>
658 *   {
659 *   public:
660 *   virtual double
661 *   value(const Point<dim> &p, const unsigned int component = 0) const override;
662 *   };
663 *  
664 *  
665 *  
666 *   template <int dim>
667 *   class BoundaryValues : public Function<dim>
668 *   {
669 *   public:
670 *   virtual double
671 *   value(const Point<dim> &p, const unsigned int component = 0) const override;
672 *   };
673 *  
674 *   template <int dim>
675 *   double
676 *   RightHandSide<dim>::value(const Point<dim> &p,
677 *   const unsigned int /*component*/) const
678 *   {
679 *   double return_value = 0.0;
680 *   for (unsigned int i = 0; i < dim; ++i)
681 *   return_value += 4.0 * std::pow(p(i), 4.0);
682 *  
683 *   return return_value;
684 *   }
685 *  
686 *  
687 *   template <int dim>
688 *   double
689 *   BoundaryValues<dim>::value(const Point<dim> &p,
690 *   const unsigned int /*component*/) const
691 *   {
692 *   return p.square();
693 *   }
694 *  
695 *  
696 *  
697 *   template <int dim>
698 *   CoupledLaplaceProblem<dim>::CoupledLaplaceProblem()
699 *   : fe(1)
700 *   , dof_handler(triangulation)
701 *   , interface_boundary_id(1)
702 *   , adapter(parameters, interface_boundary_id)
703 *   {}
704 *  
705 *  
706 *   template <int dim>
707 *   void
708 *   CoupledLaplaceProblem<dim>::make_grid()
709 *   {
710 *   GridGenerator::hyper_cube(triangulation, -1, 1);
711 *   triangulation.refine_global(4);
712 *  
713 *   for (const auto &cell : triangulation.active_cell_iterators())
714 *   for (const auto &face : cell->face_iterators())
715 *   {
716 * @endcode
717 *
718 * We choose the boundary in positive x direction for the
719 * interface coupling.
720 *
721 * @code
722 *   if (face->at_boundary() && (face->center()[0] == 1))
723 *   face->set_boundary_id(interface_boundary_id);
724 *   }
725 *  
726 *   std::cout << " Number of active cells: " << triangulation.n_active_cells()
727 *   << std::endl
728 *   << " Total number of cells: " << triangulation.n_cells()
729 *   << std::endl;
730 *   }
731 *  
732 *  
733 *   template <int dim>
734 *   void
735 *   CoupledLaplaceProblem<dim>::setup_system()
736 *   {
737 *   dof_handler.distribute_dofs(fe);
738 *  
739 *   std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
740 *   << std::endl;
741 *  
742 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
743 *   DoFTools::make_sparsity_pattern(dof_handler, dsp);
744 *   sparsity_pattern.copy_from(dsp);
745 *  
746 *   system_matrix.reinit(sparsity_pattern);
747 *  
748 *   solution.reinit(dof_handler.n_dofs());
749 *   old_solution.reinit(dof_handler.n_dofs());
750 *   system_rhs.reinit(dof_handler.n_dofs());
751 *   }
752 *  
753 *  
754 *  
755 *   template <int dim>
756 *   void
757 *   CoupledLaplaceProblem<dim>::assemble_system()
758 *   {
759 * @endcode
760 *
761 * Reset global structures
762 *
763 * @code
764 *   system_rhs = 0;
765 *   system_matrix = 0;
766 * @endcode
767 *
768 * Update old solution values
769 *
770 * @code
771 *   old_solution = solution;
772 *  
773 *   QGauss<dim> quadrature_formula(fe.degree + 1);
774 *  
775 *   RightHandSide<dim> right_hand_side;
776 *  
777 *   FEValues<dim> fe_values(fe,
778 *   quadrature_formula,
781 *  
782 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
783 *  
784 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
785 *   Vector<double> cell_rhs(dofs_per_cell);
786 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
787 * @endcode
788 *
789 * The solution values from previous time steps are stored for each quadrature
790 * point
791 *
792 * @code
793 *   std::vector<double> local_values_old_solution(fe_values.n_quadrature_points);
794 *  
795 *   for (const auto &cell : dof_handler.active_cell_iterators())
796 *   {
797 *   fe_values.reinit(cell);
798 *   cell_matrix = 0;
799 *   cell_rhs = 0;
800 * @endcode
801 *
802 * Get the local values from the `fe_values' object
803 *
804 * @code
805 *   fe_values.get_function_values(old_solution, local_values_old_solution);
806 *  
807 * @endcode
808 *
809 * The system matrix contains additionally a mass matrix due to the time
810 * discretization. The RHS has contributions from the old solution values.
811 *
812 * @code
813 *   for (const unsigned int q_index : fe_values.quadrature_point_indices())
814 *   for (const unsigned int i : fe_values.dof_indices())
815 *   {
816 *   for (const unsigned int j : fe_values.dof_indices())
817 *   cell_matrix(i, j) +=
818 *   ((fe_values.shape_value(i, q_index) * // phi_i(x_q)
819 *   fe_values.shape_value(j, q_index)) + // phi_j(x_q)
820 *   (delta_t * // delta t
821 *   fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
822 *   fe_values.shape_grad(j, q_index))) * // grad phi_j(x_q)
823 *   fe_values.JxW(q_index); // dx
824 *  
825 *   const auto x_q = fe_values.quadrature_point(q_index);
826 *   const auto &local_value = local_values_old_solution[q_index];
827 *   cell_rhs(i) += ((delta_t * // delta t
828 *   fe_values.shape_value(i, q_index) * // phi_i(x_q)
829 *   right_hand_side.value(x_q)) + // f(x_q)
830 *   fe_values.shape_value(i, q_index) *
831 *   local_value) * // phi_i(x_q)*val
832 *   fe_values.JxW(q_index); // dx
833 *   }
834 *  
835 * @endcode
836 *
837 * Copy local to global
838 *
839 * @code
840 *   cell->get_dof_indices(local_dof_indices);
841 *   for (const unsigned int i : fe_values.dof_indices())
842 *   {
843 *   for (const unsigned int j : fe_values.dof_indices())
844 *   system_matrix.add(local_dof_indices[i],
845 *   local_dof_indices[j],
846 *   cell_matrix(i, j));
847 *  
848 *   system_rhs(local_dof_indices[i]) += cell_rhs(i);
849 *   }
850 *   }
851 *   {
852 * @endcode
853 *
854 * At first, we apply the Dirichlet boundary condition from @ref step_4 "step-4", as
855 * usual.
856 *
857 * @code
858 *   std::map<types::global_dof_index, double> boundary_values;
859 *   VectorTools::interpolate_boundary_values(dof_handler,
860 *   0,
861 *   BoundaryValues<dim>(),
862 *   boundary_values);
863 *   MatrixTools::apply_boundary_values(boundary_values,
864 *   system_matrix,
865 *   solution,
866 *   system_rhs);
867 *   }
868 *   {
869 * @endcode
870 *
871 * Afterwards, we apply the coupling boundary condition. The `boundary_data`
872 * has already been filled by preCICE.
873 *
874 * @code
875 *   MatrixTools::apply_boundary_values(boundary_data,
876 *   system_matrix,
877 *   solution,
878 *   system_rhs);
879 *   }
880 *   }
881 *  
882 *  
883 *  
884 *   template <int dim>
885 *   void
886 *   CoupledLaplaceProblem<dim>::solve()
887 *   {
888 *   SolverControl solver_control(1000, 1e-12);
889 *   SolverCG<Vector<double>> solver(solver_control);
890 *   solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
891 *  
892 *   std::cout << " " << solver_control.last_step()
893 *   << " CG iterations needed to obtain convergence." << std::endl;
894 *   }
895 *  
896 *  
897 *  
898 *   template <int dim>
899 *   void
900 *   CoupledLaplaceProblem<dim>::output_results() const
901 *   {
902 *   DataOut<dim> data_out;
903 *  
904 *   data_out.attach_dof_handler(dof_handler);
905 *   data_out.add_data_vector(solution, "solution");
906 *  
907 *   data_out.build_patches(mapping);
908 *  
909 *   std::ofstream output("solution-" + std::to_string(time_step) + ".vtk");
910 *   data_out.write_vtk(output);
911 *   }
912 *  
913 *  
914 *  
915 *   template <int dim>
916 *   void
917 *   CoupledLaplaceProblem<dim>::run()
918 *   {
919 *   std::cout << "Solving problem in " << dim << " space dimensions."
920 *   << std::endl;
921 *  
922 *   make_grid();
923 *   setup_system();
924 *  
925 * @endcode
926 *
927 * After we set up the system, we initialize preCICE and the adapter using the
928 * functionalities of the Adapter.
929 *
930 * @code
931 *   adapter.initialize(dof_handler, boundary_data, mapping);
932 *  
933 * @endcode
934 *
935 * preCICE steers the coupled simulation: `isCouplingOngoing` is
936 * used to synchronize the end of the simulation with the coupling partner
937 *
938 * @code
939 *   while (adapter.precice.isCouplingOngoing())
940 *   {
941 * @endcode
942 *
943 * The time step number is solely used to generate unique output files
944 *
945 * @code
946 *   ++time_step;
947 * @endcode
948 *
949 * preCICE returns the maximum admissible time-step size,
950 * which needs to be compared to our desired solver time-step size.
951 *
952 * @code
953 *   precice_delta_t = adapter.precice.getMaxTimeStepSize();
954 *   delta_t = std::min(precice_delta_t, solver_delta_t);
955 * @endcode
956 *
957 * Next we read data. Since we use a fully backward Euler method, we want
958 * the data to be associated to the end of the current time-step (delta_t)
959 * Time-interpolation methods in preCICE allow to get readData at any
960 * point in time, if the coupling scheme allows it
961 *
962 * @code
963 *   adapter.read_data(delta_t, boundary_data);
964 *  
965 * @endcode
966 *
967 * In the time loop, we assemble the coupled system and solve it as
968 * usual.
969 *
970 * @code
971 *   assemble_system();
972 *   solve();
973 *  
974 * @endcode
975 *
976 * After we solved the system, we advance the coupling to the next time
977 * level. In a bi-directional coupled simulation, we would pass our
978 * calculated data to preCICE
979 *
980 * @code
981 *   adapter.advance(delta_t);
982 *  
983 * @endcode
984 *
985 * Write an output file if the time step is completed. In case of an
986 * implicit coupling, where individual time steps are computed more than
987 * once, the function `isTimeWindowCompleted` prevents unnecessary result
988 * writing. For this simple tutorial configuration (explicit coupling),
989 * the function returns always `true`.
990 *
991 * @code
992 *   if (adapter.precice.isTimeWindowComplete())
993 *   output_results();
994 *   }
995 *   }
996 *  
997 *  
998 *  
999 *   int
1000 *   main()
1001 *   {
1002 *   CoupledLaplaceProblem<2> laplace_problem;
1003 *   laplace_problem.run();
1004 *  
1005 *   return 0;
1006 *   }
1007 * @endcode
1008
1009
1010<a name="ann-fancy_boundary_condition.cc"></a>
1011<h1>Annotated version of fancy_boundary_condition.cc</h1>
1012 *
1013 *
1014 *
1015 *
1016 * @code
1017 *   /* -----------------------------------------------------------------------------
1018 *   *
1019 *   * SPDX-License-Identifier: LGPL-2.1-or-later
1020 *   * Copyright (C) 2020 by David Schneider
1021 *   * Copyright (C) 2020 by Benjamin Uekermann
1022 *   *
1023 *   * This file is part of the deal.II code gallery.
1024 *   *
1025 *   * -----------------------------------------------------------------------------
1026 *   */
1027 *  
1028 * @endcode
1029 *
1030 * This program does not use any deal.II functionality and depends only on
1031 * preCICE and the standard libraries.
1032 *
1033 * @code
1034 *   #include <precice/precice.hpp>
1035 *  
1036 *   #include <iostream>
1037 *   #include <sstream>
1038 *   #include <vector>
1039 *  
1040 * @endcode
1041 *
1042 * The program computes a time-varying parabolic boundary condition, which is
1043 * passed to preCICE and serves as Dirichlet boundary condition for the other
1044 * coupling participant.
1045 *
1046
1047 *
1048 * Function to generate boundary values in each time step
1049 *
1050 * @code
1051 *   void
1052 *   define_boundary_values(std::vector<double> &boundary_data,
1053 *   const double time,
1054 *   const double end_time)
1055 *   {
1056 * @endcode
1057 *
1058 * Scale the current time value
1059 *
1060 * @code
1061 *   const double relative_time = time / end_time;
1062 * @endcode
1063 *
1064 * Define the amplitude. Values run from -0.5 to 0.5
1065 *
1066 * @code
1067 *   const double amplitude = (relative_time - 0.5);
1068 * @endcode
1069 *
1070 * Specify the actual data we want to pass to the other participant. Here, we
1071 * choose a parabola with boundary values 2 in order to enforce continuity
1072 * to adjacent boundaries.
1073 *
1074 * @code
1075 *   const double n_elements = boundary_data.size();
1076 *   const double right_zero = boundary_data.size() - 1;
1077 *   const double left_zero = 0;
1078 *   const double offset = 2;
1079 *   for (uint i = 0; i < n_elements; ++i)
1080 *   boundary_data[i] =
1081 *   -amplitude * ((i - left_zero) * (i - right_zero)) + offset;
1082 *   }
1083 *  
1084 *  
1085 *   int
1086 *   main()
1087 *   {
1088 *   std::cout << "Boundary participant: starting... \n";
1089 *  
1090 * @endcode
1091 *
1092 * Configuration
1093 *
1094 * @code
1095 *   const std::string configFileName("precice-config.xml");
1096 *   const std::string solverName("boundary-participant");
1097 *   const std::string meshName("boundary-mesh");
1098 *   const std::string dataWriteName("boundary-data");
1099 *  
1100 * @endcode
1101 *
1102 * Adjust to MPI rank and size for parallel computation
1103 *
1104 * @code
1105 *   const int commRank = 0;
1106 *   const int commSize = 1;
1107 *  
1108 *   precice::Participant precice(solverName, configFileName, commRank, commSize);
1109 *  
1110 *   const int dimensions = precice.getMeshDimensions(meshName);
1111 *   const int numberOfVertices = 6;
1112 *  
1113 * @endcode
1114 *
1115 * Set up data structures
1116 *
1117 * @code
1118 *   std::vector<double> writeData(numberOfVertices);
1119 *   std::vector<int> vertexIDs(numberOfVertices);
1120 *   std::vector<double> vertices(numberOfVertices * dimensions);
1121 *  
1122 * @endcode
1123 *
1124 * Define a boundary mesh
1125 *
1126 * @code
1127 *   std::cout << "Boundary participant: defining boundary mesh \n";
1128 *   const double length = 2;
1129 *   const double xCoord = 1;
1130 *   const double deltaY = length / (numberOfVertices - 1);
1131 *   for (int i = 0; i < numberOfVertices; ++i)
1132 *   for (int j = 0; j < dimensions; ++j)
1133 *   {
1134 *   const unsigned int index = dimensions * i + j;
1135 * @endcode
1136 *
1137 * The x-coordinate is always 1, i.e., the boundary is parallel to the
1138 * y-axis. The y-coordinate is descending from 1 to -1.
1139 *
1140 * @code
1141 *   if (j == 0)
1142 *   vertices[index] = xCoord;
1143 *   else
1144 *   vertices[index] = 1 - deltaY * i;
1145 *   }
1146 *  
1147 * @endcode
1148 *
1149 * Pass the vertices to preCICE
1150 *
1151 * @code
1152 *   precice.setMeshVertices(meshName, vertices, vertexIDs);
1153 *  
1154 * @endcode
1155 *
1156 * Variables for the time
1157 *
1158 * @code
1159 *   const double end_time = 1;
1160 *   double time = 0;
1161 *  
1162 * @endcode
1163 *
1164 * Not used in the configuration by default
1165 *
1166 * @code
1167 *   if (precice.requiresInitialData())
1168 *   {
1169 *   std::cout << "Boundary participant: writing initial data \n";
1170 *   define_boundary_values(writeData, time, end_time);
1171 *   precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1172 *   }
1173 *  
1174 * @endcode
1175 *
1176 * initialize the Participant
1177 *
1178 * @code
1179 *   precice.initialize();
1180 *  
1181 * @endcode
1182 *
1183 * Start time loop
1184 *
1185 * @code
1186 *   while (precice.isCouplingOngoing())
1187 *   {
1188 *   double dt = precice.getMaxTimeStepSize();
1189 *   time += dt;
1190 *  
1191 * @endcode
1192 *
1193 * Generate new boundary data
1194 *
1195 * @code
1196 *   define_boundary_values(writeData, time, end_time);
1197 *  
1198 *   std::cout << "Boundary participant: writing coupling data \n";
1199 *   precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1200 *  
1201 *   std::cout << "Boundary participant: advancing in time\n";
1202 *   precice.advance(dt);
1203 *   }
1204 *  
1205 *   std::cout << "Boundary participant: closing...\n";
1206 *  
1207 *   return 0;
1208 *   }
1209 * @endcode
1210
1211
1212*/
Definition fe_q.h:554
Definition point.h:113
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
const bool IsBlockVector< VectorType >::value
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
MappingQ< dim, spacedim > MappingQGeneric
Definition mapping_q.h:683
MappingQ< dim, spacedim > StaticMappingQ1< dim, spacedim >::mapping
Definition mapping_q1.h:104
IndexSet extract_boundary_dofs(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask={}, const std::set< types::boundary_id > &boundary_ids={})
Definition dof_tools.cc:616
void map_dofs_to_support_points(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::vector< Point< spacedim > > &support_points, const ComponentMask &mask={})
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
constexpr types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:193
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:105
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:120
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Function &function, const unsigned int grainsize)
Definition parallel.h:204
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
unsigned int boundary_id
Definition types.h:161
void advance(std::tuple< I1, I2 > &t, const unsigned int n)