version 3.7
multidomain/fvassembler.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3//
4// SPDX-FileCopyrightInfo: Copyright © DuMux Project contributors, see AUTHORS.md in root folder
5// SPDX-License-Identifier: GPL-3.0-or-later
6//
14#ifndef DUMUX_MULTIDOMAIN_FV_ASSEMBLER_HH
15#define DUMUX_MULTIDOMAIN_FV_ASSEMBLER_HH
16
17#include <type_traits>
18#include <tuple>
19
20#include <dune/common/hybridutilities.hh>
21#include <dune/istl/matrixindexset.hh>
22
33
39
41
42namespace Dumux {
43
44namespace Grid::Capabilities {
45
46namespace Detail {
47// helper for multi-domain models
48template<class T, std::size_t... I>
49bool allGridsSupportsMultithreadingImpl(const T& gridGeometries, std::index_sequence<I...>)
50{
51 return (... && supportsMultithreading(std::get<I>(gridGeometries)->gridView()));
52}
53} // end namespace Detail
54
55// helper for multi-domain models (all grids have to support multithreading)
56template<class... GG>
57bool allGridsSupportsMultithreading(const std::tuple<GG...>& gridGeometries)
58{
59 return Detail::allGridsSupportsMultithreadingImpl<std::tuple<GG...>>(gridGeometries, std::make_index_sequence<sizeof...(GG)>());
60}
61
62} // end namespace Grid::Capabilities
63
69template<class CM>
70struct CouplingManagerSupportsMultithreadedAssembly : public std::false_type
71{};
72
82template<class MDTraits, class CMType, DiffMethod diffMethod, bool useImplicitAssembly = true>
84{
85 template<std::size_t id>
86 using SubDomainTypeTag = typename MDTraits::template SubDomain<id>::TypeTag;
87
88public:
89 using Traits = MDTraits;
90
91 using Scalar = typename MDTraits::Scalar;
92
94 template<std::size_t id>
95 using LocalResidual = GetPropType<SubDomainTypeTag<id>, Properties::LocalResidual>;
96
97 template<std::size_t id>
98 using GridVariables = typename MDTraits::template SubDomain<id>::GridVariables;
99
100 template<std::size_t id>
101 using GridGeometry = typename MDTraits::template SubDomain<id>::GridGeometry;
102
103 template<std::size_t id>
104 using Problem = typename MDTraits::template SubDomain<id>::Problem;
105
106 using JacobianMatrix = typename MDTraits::JacobianMatrix;
107 using SolutionVector = typename MDTraits::SolutionVector;
108 using ResidualType = typename MDTraits::ResidualVector;
109
110 using CouplingManager = CMType;
111
115 static constexpr bool isImplicit()
116 { return useImplicitAssembly; }
117
118private:
119
120 using ProblemTuple = typename MDTraits::template TupleOfSharedPtrConst<Problem>;
121 using GridGeometryTuple = typename MDTraits::template TupleOfSharedPtrConst<GridGeometry>;
122 using GridVariablesTuple = typename MDTraits::template TupleOfSharedPtr<GridVariables>;
123
125 using ThisType = MultiDomainFVAssembler<MDTraits, CouplingManager, diffMethod, isImplicit()>;
126
127 template<class DiscretizationMethod, std::size_t id>
128 struct SubDomainAssemblerType;
129
130 template<std::size_t id>
131 struct SubDomainAssemblerType<DiscretizationMethods::CCTpfa, id>
132 {
133 using type = SubDomainCCLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
134 };
135
136 template<std::size_t id>
137 struct SubDomainAssemblerType<DiscretizationMethods::CCMpfa, id>
138 {
139 using type = SubDomainCCLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
140 };
141
142 template<std::size_t id, class DM>
143 struct SubDomainAssemblerType<DiscretizationMethods::CVFE<DM>, id>
144 {
145 using type = SubDomainCVFELocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
146 };
147
148 template<std::size_t id>
149 struct SubDomainAssemblerType<DiscretizationMethods::Staggered, id>
150 {
151 using type = SubDomainStaggeredLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
152 };
153
154 template<std::size_t id>
155 struct SubDomainAssemblerType<DiscretizationMethods::FCStaggered, id>
156 {
157 using type = SubDomainFaceCenteredLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
158 };
159
160 template<std::size_t id>
161 using SubDomainAssembler = typename SubDomainAssemblerType<typename GridGeometry<id>::DiscretizationMethod, id>::type;
162
163public:
164
165
172 GridGeometryTuple gridGeometry,
173 GridVariablesTuple gridVariables,
174 std::shared_ptr<CouplingManager> couplingManager)
176 , problemTuple_(std::move(problem))
177 , gridGeometryTuple_(std::move(gridGeometry))
178 , gridVariablesTuple_(std::move(gridVariables))
179 , timeLoop_()
180 , isStationaryProblem_(true)
181 , warningIssued_(false)
182 {
183 static_assert(isImplicit(), "Explicit assembler for stationary problem doesn't make sense!");
184 std::cout << "Instantiated assembler for a stationary problem." << std::endl;
185
189 && getParam<bool>("Assembly.Multithreading", true);
190
191 maybeComputeColors_();
192 }
193
200 GridGeometryTuple gridGeometry,
201 GridVariablesTuple gridVariables,
202 std::shared_ptr<CouplingManager> couplingManager,
203 std::shared_ptr<const TimeLoop> timeLoop,
204 const SolutionVector& prevSol)
206 , problemTuple_(std::move(problem))
207 , gridGeometryTuple_(std::move(gridGeometry))
208 , gridVariablesTuple_(std::move(gridVariables))
209 , timeLoop_(timeLoop)
210 , prevSol_(&prevSol)
211 , isStationaryProblem_(false)
212 , warningIssued_(false)
213 {
214 std::cout << "Instantiated assembler for an instationary problem." << std::endl;
215
219 && getParam<bool>("Assembly.Multithreading", true);
220
221 maybeComputeColors_();
222 }
223
229 {
230 checkAssemblerState_();
231 resetJacobian_();
232 resetResidual_();
233
234 using namespace Dune::Hybrid;
235 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
236 {
237 auto& jacRow = (*jacobian_)[domainId];
238 auto& subRes = (*residual_)[domainId];
239 this->assembleJacobianAndResidual_(domainId, jacRow, subRes, curSol);
240
241 const auto gridGeometry = std::get<domainId>(gridGeometryTuple_);
242 enforcePeriodicConstraints_(domainId, jacRow, subRes, *gridGeometry, curSol[domainId]);
243 });
244 }
245
248 {
249 resetResidual_();
250 assembleResidual(*residual_, curSol);
251 }
252
255 {
256 r = 0.0;
257
258 checkAssemblerState_();
259
260 // update the grid variables for the case of active caching
261 updateGridVariables(curSol);
262
263 using namespace Dune::Hybrid;
264 forEach(integralRange(Dune::Hybrid::size(r)), [&](const auto domainId)
265 {
266 auto& subRes = r[domainId];
267 this->assembleResidual_(domainId, subRes, curSol);
268 });
269 }
270
272 [[deprecated("Use the linear solver's norm. Will be deleted after 3.7")]]
274 {
275 // calculate the squared norm of the residual
276 Scalar resultSquared = 0.0;
277
278 // for box communicate the residual with the neighboring processes
279 using namespace Dune::Hybrid;
280 forEach(integralRange(Dune::Hybrid::size(residual)), [&](const auto domainId)
281 {
282 const auto gridGeometry = std::get<domainId>(gridGeometryTuple_);
283 const auto& gridView = gridGeometry->gridView();
284
285 if (gridView.comm().size() > 1 && gridView.overlapSize(0) == 0)
286 {
288 {
289 using GV = typename GridGeometry<domainId>::GridView;
290 using DM = typename GridGeometry<domainId>::VertexMapper;
292
293 PVHelper vectorHelper(gridView, gridGeometry->vertexMapper());
294
295 vectorHelper.makeNonOverlappingConsistent(residual[domainId]);
296 }
297 }
298 else if (!warningIssued_)
299 {
300 if (gridView.comm().size() > 1 && gridView.comm().rank() == 0)
301 std::cout << "\nWarning: norm calculation adds entries corresponding to\n"
302 << "overlapping entities multiple times. Please use the norm\n"
303 << "function provided by a linear solver instead." << std::endl;
304
305 warningIssued_ = true;
306 }
307
308 Scalar localNormSquared = residual[domainId].two_norm2();
309
310 if (gridView.comm().size() > 1)
311 {
312 localNormSquared = gridView.comm().sum(localNormSquared);
313 }
314
315 resultSquared += localNormSquared;
316 });
317
318 using std::sqrt;
319 return sqrt(resultSquared);
320 }
321
323 [[deprecated("Use norm(curSol) provided by the linear solver class instead. Will be deleted after 3.7")]]
325 {
327 setResidualSize_(residual);
328 assembleResidual(residual, curSol);
329 return normOfResidual(residual);
330 }
331
337 void setLinearSystem(std::shared_ptr<JacobianMatrix> A,
338 std::shared_ptr<ResidualType> r)
339 {
340 jacobian_ = A;
341 residual_ = r;
342
343 setJacobianBuildMode(*jacobian_);
344 setJacobianPattern_(*jacobian_);
345 setResidualSize_(*residual_);
346 }
347
353 {
354 jacobian_ = std::make_shared<JacobianMatrix>();
355 residual_ = std::make_shared<ResidualType>();
356
357 setJacobianBuildMode(*jacobian_);
358 setJacobianPattern_(*jacobian_);
359 setResidualSize_(*residual_);
360 }
361
366 {
367 using namespace Dune::Hybrid;
368 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto i)
369 {
370 forEach(jac[i], [&](auto& jacBlock)
371 {
372 using BlockType = std::decay_t<decltype(jacBlock)>;
373 if (jacBlock.buildMode() == BlockType::BuildMode::unknown)
374 jacBlock.setBuildMode(BlockType::BuildMode::random);
375 else if (jacBlock.buildMode() != BlockType::BuildMode::random)
376 DUNE_THROW(Dune::NotImplemented, "Only BCRS matrices with random build mode are supported at the moment");
377 });
378 });
379 }
380
385 {
386 setResidualSize_();
387 setJacobianPattern_();
388 maybeComputeColors_();
389 }
390
395 {
396 using namespace Dune::Hybrid;
397 forEach(integralRange(Dune::Hybrid::size(gridVariablesTuple_)), [&](const auto domainId)
398 { this->gridVariables(domainId).update(curSol[domainId]); });
399 }
400
404 void resetTimeStep(const SolutionVector& curSol)
405 {
406 using namespace Dune::Hybrid;
407 forEach(integralRange(Dune::Hybrid::size(gridVariablesTuple_)), [&](const auto domainId)
408 { this->gridVariables(domainId).resetTimeStep(curSol[domainId]); });
409 }
410
412 template<std::size_t i>
413 std::size_t numDofs(Dune::index_constant<i> domainId) const
414 { return std::get<domainId>(gridGeometryTuple_)->numDofs(); }
415
417 template<std::size_t i>
418 const auto& problem(Dune::index_constant<i> domainId) const
419 { return *std::get<domainId>(problemTuple_); }
420
422 template<std::size_t i>
423 const auto& gridGeometry(Dune::index_constant<i> domainId) const
424 { return *std::get<domainId>(gridGeometryTuple_); }
425
427 template<std::size_t i>
428 const auto& gridView(Dune::index_constant<i> domainId) const
429 { return gridGeometry(domainId).gridView(); }
430
432 template<std::size_t i>
433 GridVariables<i>& gridVariables(Dune::index_constant<i> domainId)
434 { return *std::get<domainId>(gridVariablesTuple_); }
435
437 template<std::size_t i>
438 const GridVariables<i>& gridVariables(Dune::index_constant<i> domainId) const
439 { return *std::get<domainId>(gridVariablesTuple_); }
440
443 { return *couplingManager_; }
444
447 { return *jacobian_; }
448
451 { return *residual_; }
452
454 const SolutionVector& prevSol() const
455 { return *prevSol_; }
456
461 void setTimeManager(std::shared_ptr<const TimeLoop> timeLoop)
462 { timeLoop_ = timeLoop; isStationaryProblem_ = !(static_cast<bool>(timeLoop)); }
463
469 { prevSol_ = &u; }
470
475 { return isStationaryProblem_; }
476
480 template<std::size_t i>
481 LocalResidual<i> localResidual(Dune::index_constant<i> domainId) const
482 { return LocalResidual<i>(std::get<domainId>(problemTuple_).get(), timeLoop_.get()); }
483
484protected:
486 std::shared_ptr<CouplingManager> couplingManager_;
487
488private:
492 void setJacobianPattern_(JacobianMatrix& jac) const
493 {
494 using namespace Dune::Hybrid;
495 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainI)
496 {
497 forEach(integralRange(Dune::Hybrid::size(jac[domainI])), [&](const auto domainJ)
498 {
499 const auto pattern = this->getJacobianPattern_(domainI, domainJ);
500 pattern.exportIdx(jac[domainI][domainJ]);
501 });
502 });
503 }
504
508 void setResidualSize_(ResidualType& res) const
509 {
510 using namespace Dune::Hybrid;
511 forEach(integralRange(Dune::Hybrid::size(res)), [&](const auto domainId)
512 { res[domainId].resize(this->numDofs(domainId)); });
513 }
514
515 // reset the residual vector to 0.0
516 void resetResidual_()
517 {
518 if(!residual_)
519 {
520 residual_ = std::make_shared<ResidualType>();
521 setResidualSize_(*residual_);
522 }
523
524 (*residual_) = 0.0;
525 }
526
527 // reset the jacobian vector to 0.0
528 void resetJacobian_()
529 {
530 if(!jacobian_)
531 {
532 jacobian_ = std::make_shared<JacobianMatrix>();
533 setJacobianBuildMode(*jacobian_);
534 setJacobianPattern_(*jacobian_);
535 }
536
537 (*jacobian_) = 0.0;
538 }
539
541 void maybeComputeColors_()
542 {
543 if constexpr (CouplingManagerSupportsMultithreadedAssembly<CouplingManager>::value)
544 if (enableMultithreading_)
545 couplingManager_->computeColorsForAssembly();
546 }
547
548 // check if the assembler is in a correct state for assembly
549 void checkAssemblerState_() const
550 {
551 if (!isStationaryProblem_ && !prevSol_)
552 DUNE_THROW(Dune::InvalidStateException, "Assembling instationary problem but previous solution was not set!");
553
554 if (isStationaryProblem_ && prevSol_)
555 DUNE_THROW(Dune::InvalidStateException, "Assembling stationary problem but a previous solution was set."
556 << " Did you forget to set the timeLoop to make this problem instationary?");
557 }
558
559 template<std::size_t i, class JacRow, class SubRes>
560 void assembleJacobianAndResidual_(Dune::index_constant<i> domainId, JacRow& jacRow, SubRes& subRes,
561 const SolutionVector& curSol)
562 {
563 assemble_(domainId, [&](const auto& element)
564 {
565 SubDomainAssembler<i> subDomainAssembler(*this, element, curSol, *couplingManager_);
566 subDomainAssembler.assembleJacobianAndResidual(jacRow, subRes, gridVariablesTuple_);
567 });
568 }
569
570 template<std::size_t i, class SubRes>
571 void assembleResidual_(Dune::index_constant<i> domainId, SubRes& subRes,
572 const SolutionVector& curSol)
573 {
574 assemble_(domainId, [&](const auto& element)
575 {
576 SubDomainAssembler<i> subDomainAssembler(*this, element, curSol, *couplingManager_);
577 subDomainAssembler.assembleResidual(subRes);
578 });
579 }
580
586 template<std::size_t i, class AssembleElementFunc>
587 void assemble_(Dune::index_constant<i> domainId, AssembleElementFunc&& assembleElement) const
588 {
589 // a state that will be checked on all processes
590 bool succeeded = false;
591
592 // try assembling using the local assembly function
593 try
594 {
595 if constexpr (CouplingManagerSupportsMultithreadedAssembly<CouplingManager>::value)
596 {
597 if (enableMultithreading_)
598 {
599 couplingManager_->assembleMultithreaded(
600 domainId, std::forward<AssembleElementFunc>(assembleElement)
601 );
602 return;
603 }
604 }
605
606 // fallback for coupling managers that don't support multithreaded assembly (yet)
607 // or if multithreaded assembly is disabled
608 // let the local assembler add the element contributions
609 for (const auto& element : elements(gridView(domainId)))
610 assembleElement(element);
611
612 // if we get here, everything worked well on this process
613 succeeded = true;
614 }
615 // throw exception if a problem occurred
616 catch (NumericalProblem &e)
617 {
618 std::cout << "rank " << gridView(domainId).comm().rank()
619 << " caught an exception while assembling:" << e.what()
620 << "\n";
621 succeeded = false;
622 }
623
624 // make sure everything worked well on all processes
625 if (gridView(domainId).comm().size() > 1)
626 succeeded = gridView(domainId).comm().min(succeeded);
627
628 // if not succeeded rethrow the error on all processes
629 if (!succeeded)
630 DUNE_THROW(NumericalProblem, "A process did not succeed in linearizing the system");
631 }
632
633 // get diagonal block pattern
634 template<std::size_t i, std::size_t j, typename std::enable_if_t<(i==j), int> = 0>
635 Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant<i> domainI,
636 Dune::index_constant<j> domainJ) const
637 {
638 const auto& gg = gridGeometry(domainI);
639 auto pattern = getJacobianPattern<isImplicit()>(gg);
640 couplingManager_->extendJacobianPattern(domainI, pattern);
641 return pattern;
642 }
643
644 // get coupling block pattern
645 template<std::size_t i, std::size_t j, typename std::enable_if_t<(i!=j), int> = 0>
646 Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant<i> domainI,
647 Dune::index_constant<j> domainJ) const
648 {
649 return getCouplingJacobianPattern<isImplicit()>(*couplingManager_,
650 domainI, gridGeometry(domainI),
651 domainJ, gridGeometry(domainJ));
652 }
653
654 // build periodic constraints into the system matrix
655 template<std::size_t i, class JacRow, class Res, class GG, class Sol>
656 void enforcePeriodicConstraints_(Dune::index_constant<i> domainI, JacRow& jacRow, Res& res, const GG& gridGeometry, const Sol& curSol)
657 {
658 if constexpr (GG::discMethod == DiscretizationMethods::box || GG::discMethod == DiscretizationMethods::fcstaggered)
659 {
660 for (const auto& m : gridGeometry.periodicVertexMap())
661 {
662 if (m.first < m.second)
663 {
664 auto& jac = jacRow[domainI];
665
666 // add the second row to the first
667 res[m.first] += res[m.second];
668
669 // enforce the solution of the first periodic DOF to the second one
670 res[m.second] = curSol[m.second] - curSol[m.first];
671
672 const auto end = jac[m.second].end();
673 for (auto it = jac[m.second].begin(); it != end; ++it)
674 jac[m.first][it.index()] += (*it);
675
676 // enforce constraint in second row
677 for (auto it = jac[m.second].begin(); it != end; ++it)
678 (*it) = it.index() == m.second ? 1.0 : it.index() == m.first ? -1.0 : 0.0;
679
680 using namespace Dune::Hybrid;
681 forEach(makeIncompleteIntegerSequence<JacRow::size(), domainI>(), [&](const auto couplingDomainId)
682 {
683 auto& jacCoupling = jacRow[couplingDomainId];
684
685 for (auto it = jacCoupling[m.second].begin(); it != jacCoupling[m.second].end(); ++it)
686 jacCoupling[m.first][it.index()] += (*it);
687
688 for (auto it = jacCoupling[m.second].begin(); it != jacCoupling[m.second].end(); ++it)
689 (*it) = 0.0;
690 });
691 }
692 }
693 }
694 }
695
697 ProblemTuple problemTuple_;
698
700 GridGeometryTuple gridGeometryTuple_;
701
703 GridVariablesTuple gridVariablesTuple_;
704
706 std::shared_ptr<const TimeLoop> timeLoop_;
707
709 const SolutionVector* prevSol_ = nullptr;
710
712 bool isStationaryProblem_;
713
715 std::shared_ptr<JacobianMatrix> jacobian_;
716 std::shared_ptr<ResidualType> residual_;
717
719 mutable bool warningIssued_;
720
722 bool enableMultithreading_ = false;
723};
724
725} // end namespace Dumux
726
727#endif
A linear system assembler (residual and Jacobian) for finite volume schemes (box, tpfa,...
Definition: multidomain/fvassembler.hh:84
std::size_t numDofs(Dune::index_constant< i > domainId) const
the number of dof locations of domain i
Definition: multidomain/fvassembler.hh:413
void updateGridVariables(const SolutionVector &curSol)
Updates the grid variables with the given solution.
Definition: multidomain/fvassembler.hh:394
void updateAfterGridAdaption()
Resizes jacobian and residual and recomputes colors.
Definition: multidomain/fvassembler.hh:384
typename MDTraits::SolutionVector SolutionVector
Definition: multidomain/fvassembler.hh:107
typename MDTraits::template SubDomain< id >::Problem Problem
Definition: multidomain/fvassembler.hh:104
JacobianMatrix & jacobian()
the full Jacobian matrix
Definition: multidomain/fvassembler.hh:446
static constexpr bool isImplicit()
Returns true if the assembler considers implicit assembly.
Definition: multidomain/fvassembler.hh:115
void assembleResidual(ResidualType &r, const SolutionVector &curSol)
assemble a residual r
Definition: multidomain/fvassembler.hh:254
typename MDTraits::JacobianMatrix JacobianMatrix
Definition: multidomain/fvassembler.hh:106
CMType CouplingManager
Definition: multidomain/fvassembler.hh:110
void assembleResidual(const SolutionVector &curSol)
compute the residuals using the internal residual
Definition: multidomain/fvassembler.hh:247
void resetTimeStep(const SolutionVector &curSol)
Resets the grid variables to the last time step.
Definition: multidomain/fvassembler.hh:404
Scalar residualNorm(const SolutionVector &curSol)
compute the residual and return it's vector norm
Definition: multidomain/fvassembler.hh:324
const GridVariables< i > & gridVariables(Dune::index_constant< i > domainId) const
the grid variables of domain i
Definition: multidomain/fvassembler.hh:438
void setPreviousSolution(const SolutionVector &u)
Sets the solution from which to start the time integration. Has to be called prior to assembly for ti...
Definition: multidomain/fvassembler.hh:468
typename MDTraits::template SubDomain< id >::GridVariables GridVariables
Definition: multidomain/fvassembler.hh:98
const auto & gridGeometry(Dune::index_constant< i > domainId) const
the finite volume grid geometry of domain i
Definition: multidomain/fvassembler.hh:423
void setLinearSystem()
The version without arguments uses the default constructor to create the jacobian and residual object...
Definition: multidomain/fvassembler.hh:352
const auto & gridView(Dune::index_constant< i > domainId) const
the grid view of domain i
Definition: multidomain/fvassembler.hh:428
typename MDTraits::template SubDomain< id >::GridGeometry GridGeometry
Definition: multidomain/fvassembler.hh:101
const CouplingManager & couplingManager() const
the coupling manager
Definition: multidomain/fvassembler.hh:442
void setTimeManager(std::shared_ptr< const TimeLoop > timeLoop)
Set time loop for instationary problems.
Definition: multidomain/fvassembler.hh:461
const SolutionVector & prevSol() const
the solution of the previous time step
Definition: multidomain/fvassembler.hh:454
GridVariables< i > & gridVariables(Dune::index_constant< i > domainId)
the grid variables of domain i
Definition: multidomain/fvassembler.hh:433
std::shared_ptr< CouplingManager > couplingManager_
the coupling manager coupling the sub domains
Definition: multidomain/fvassembler.hh:486
MDTraits Traits
Definition: multidomain/fvassembler.hh:89
MultiDomainFVAssembler(ProblemTuple problem, GridGeometryTuple gridGeometry, GridVariablesTuple gridVariables, std::shared_ptr< CouplingManager > couplingManager, std::shared_ptr< const TimeLoop > timeLoop, const SolutionVector &prevSol)
The constructor for instationary problems.
Definition: multidomain/fvassembler.hh:199
void setJacobianBuildMode(JacobianMatrix &jac) const
Sets the jacobian build mode.
Definition: multidomain/fvassembler.hh:365
GetPropType< SubDomainTypeTag< id >, Properties::LocalResidual > LocalResidual
TODO get rid of this GetPropType.
Definition: multidomain/fvassembler.hh:95
Scalar normOfResidual(ResidualType &residual) const
compute a residual's vector norm (this is a temporary interface introduced during the deprecation per...
Definition: multidomain/fvassembler.hh:273
LocalResidual< i > localResidual(Dune::index_constant< i > domainId) const
Create a local residual object (used by the local assembler)
Definition: multidomain/fvassembler.hh:481
const auto & problem(Dune::index_constant< i > domainId) const
the problem of domain i
Definition: multidomain/fvassembler.hh:418
typename MDTraits::ResidualVector ResidualType
Definition: multidomain/fvassembler.hh:108
MultiDomainFVAssembler(ProblemTuple problem, GridGeometryTuple gridGeometry, GridVariablesTuple gridVariables, std::shared_ptr< CouplingManager > couplingManager)
The constructor for stationary problems.
Definition: multidomain/fvassembler.hh:171
void assembleJacobianAndResidual(const SolutionVector &curSol)
Assembles the global Jacobian of the residual and the residual for the current solution.
Definition: multidomain/fvassembler.hh:228
bool isStationaryProblem() const
Whether we are assembling a stationary or instationary problem.
Definition: multidomain/fvassembler.hh:474
typename MDTraits::Scalar Scalar
Definition: multidomain/fvassembler.hh:91
ResidualType & residual()
the full residual vector
Definition: multidomain/fvassembler.hh:450
void setLinearSystem(std::shared_ptr< JacobianMatrix > A, std::shared_ptr< ResidualType > r)
Tells the assembler which jacobian and residual to use. This also resizes the containers to the requi...
Definition: multidomain/fvassembler.hh:337
Definition: parallelhelpers.hh:476
The cell-centered scheme multidomain local assembler.
Definition: subdomaincclocalassembler.hh:270
Manages the handling of time dependent problems.
Definition: common/timeloop.hh:56
The default time loop for instationary simulations.
Definition: common/timeloop.hh:101
Defines all properties used in Dumux.
Manages the handling of time dependent problems.
Helper function to generate Jacobian pattern for multi domain models.
An enum class to define various differentiation methods available in order to compute the derivatives...
Some exceptions thrown in DuMux
dune-grid capabilities compatibility layer
constexpr bool isSerial()
Checking whether the backend is serial.
Definition: multithreading.hh:45
typename GetProp< TypeTag, Property >::type GetPropType
get the type alias defined in the property
Definition: propertysystem.hh:267
Helper function to generate Jacobian pattern for different discretization methods.
The available discretization methods in Dumux.
Multithreading in Dumux.
constexpr Box box
Definition: method.hh:147
constexpr FCStaggered fcstaggered
Definition: method.hh:151
bool allGridsSupportsMultithreadingImpl(const T &gridGeometries, std::index_sequence< I... >)
Definition: multidomain/fvassembler.hh:49
bool allGridsSupportsMultithreading(const std::tuple< GG... > &gridGeometries)
Definition: multidomain/fvassembler.hh:57
bool supportsMultithreading(const GridView &gridView)
Definition: gridcapabilities.hh:74
Definition: adapt.hh:17
typename Detail::ConcatSeq< decltype(std::make_index_sequence< e >{}), e+1, decltype(std::make_index_sequence<(n > e) ?(n - e - 1) :0 >{})>::type makeIncompleteIntegerSequence
Definition: utility.hh:59
Provides a helper class for nonoverlapping decomposition.
trait that is specialized for coupling manager supporting multithreaded assembly
Definition: multidomain/fvassembler.hh:71
A multidomain local assembler for Jacobian and residual contribution per element (cell-centered metho...
An assembler for Jacobian and residual contribution per element (CVFE methods) for multidomain proble...
An assembler for Jacobian and residual contribution per element (face-centered staggered methods) for...
A multidomain assembler for Jacobian and residual contribution per element (staggered method)
Utilities for template meta programming.