3.6-git
DUNE for Multi-{Phase, Component, Scale, Physics, ...} flow and transport in porous media
multidomain/fvassembler.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*****************************************************************************
4 * See the file COPYING for full copying permissions. *
5 * *
6 * This program is free software: you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation, either version 3 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
18 *****************************************************************************/
26#ifndef DUMUX_MULTIDOMAIN_FV_ASSEMBLER_HH
27#define DUMUX_MULTIDOMAIN_FV_ASSEMBLER_HH
28
29#include <type_traits>
30#include <tuple>
31
32#include <dune/common/hybridutilities.hh>
33#include <dune/istl/matrixindexset.hh>
34
45
53
55
56namespace Dumux {
57
58namespace Grid::Capabilities {
59
60namespace Detail {
61// helper for multi-domain models
62template<class T, std::size_t... I>
63bool allGridsSupportsMultithreadingImpl(const T& gridGeometries, std::index_sequence<I...>)
64{
65 return (... && supportsMultithreading(std::get<I>(gridGeometries)->gridView()));
66}
67} // end namespace Detail
68
69// helper for multi-domain models (all grids have to support multithreading)
70template<class... GG>
71bool allGridsSupportsMultithreading(const std::tuple<GG...>& gridGeometries)
72{
73 return Detail::allGridsSupportsMultithreadingImpl<std::tuple<GG...>>(gridGeometries, std::make_index_sequence<sizeof...(GG)>());
74}
75
76} // end namespace Grid::Capabilities
77
83template<class CM>
84struct CouplingManagerSupportsMultithreadedAssembly : public std::false_type
85{};
86
96template<class MDTraits, class CMType, DiffMethod diffMethod, bool useImplicitAssembly = true>
98{
99 template<std::size_t id>
100 using SubDomainTypeTag = typename MDTraits::template SubDomain<id>::TypeTag;
101
102public:
103 using Traits = MDTraits;
104
105 using Scalar = typename MDTraits::Scalar;
106
108 template<std::size_t id>
110
111 template<std::size_t id>
112 using GridVariables = typename MDTraits::template SubDomain<id>::GridVariables;
113
114 template<std::size_t id>
115 using GridGeometry = typename MDTraits::template SubDomain<id>::GridGeometry;
116
117 template<std::size_t id>
118 using Problem = typename MDTraits::template SubDomain<id>::Problem;
119
120 using JacobianMatrix = typename MDTraits::JacobianMatrix;
121 using SolutionVector = typename MDTraits::SolutionVector;
123
124 using CouplingManager = CMType;
125
129 static constexpr bool isImplicit()
130 { return useImplicitAssembly; }
131
132private:
133
134 using ProblemTuple = typename MDTraits::template TupleOfSharedPtrConst<Problem>;
135 using GridGeometryTuple = typename MDTraits::template TupleOfSharedPtrConst<GridGeometry>;
136 using GridVariablesTuple = typename MDTraits::template TupleOfSharedPtr<GridVariables>;
137
139 using ThisType = MultiDomainFVAssembler<MDTraits, CouplingManager, diffMethod, isImplicit()>;
140
141 template<class DiscretizationMethod, std::size_t id>
142 struct SubDomainAssemblerType;
143
144 template<std::size_t id>
145 struct SubDomainAssemblerType<DiscretizationMethods::CCTpfa, id>
146 {
147 using type = SubDomainCCLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
148 };
149
150 template<std::size_t id>
151 struct SubDomainAssemblerType<DiscretizationMethods::CCMpfa, id>
152 {
153 using type = SubDomainCCLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
154 };
155
156 template<std::size_t id>
157 struct SubDomainAssemblerType<DiscretizationMethods::Box, id>
158 {
159 using type = SubDomainBoxLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
160 };
161
162 template<std::size_t id>
163 struct SubDomainAssemblerType<DiscretizationMethods::Staggered, id>
164 {
165 using type = SubDomainStaggeredLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
166 };
167
168 template<std::size_t id>
169 struct SubDomainAssemblerType<DiscretizationMethods::FCStaggered, id>
170 {
171 using type = SubDomainFaceCenteredLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
172 };
173
174 template<std::size_t id>
175 struct SubDomainAssemblerType<DiscretizationMethods::FCDiamond, id>
176 {
177 using type = SubDomainFaceCenteredDiamondLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
178 };
179
180 template<std::size_t id>
181 struct SubDomainAssemblerType<DiscretizationMethods::PQ1Bubble, id>
182 {
183 using type = SubDomainPQ1BubbleLocalAssembler<id, SubDomainTypeTag<id>, ThisType, diffMethod, isImplicit()>;
184 };
185
186 template<std::size_t id>
187 using SubDomainAssembler = typename SubDomainAssemblerType<typename GridGeometry<id>::DiscretizationMethod, id>::type;
188
189public:
190
191
198 GridGeometryTuple gridGeometry,
199 GridVariablesTuple gridVariables,
200 std::shared_ptr<CouplingManager> couplingManager)
202 , problemTuple_(std::move(problem))
203 , gridGeometryTuple_(std::move(gridGeometry))
204 , gridVariablesTuple_(std::move(gridVariables))
205 , timeLoop_()
206 , isStationaryProblem_(true)
207 , warningIssued_(false)
208 {
209 static_assert(isImplicit(), "Explicit assembler for stationary problem doesn't make sense!");
210 std::cout << "Instantiated assembler for a stationary problem." << std::endl;
211
215 && getParam<bool>("Assembly.Multithreading", true);
216
217 maybeComputeColors_();
218 }
219
226 GridGeometryTuple gridGeometry,
227 GridVariablesTuple gridVariables,
228 std::shared_ptr<CouplingManager> couplingManager,
229 std::shared_ptr<const TimeLoop> timeLoop,
230 const SolutionVector& prevSol)
232 , problemTuple_(std::move(problem))
233 , gridGeometryTuple_(std::move(gridGeometry))
234 , gridVariablesTuple_(std::move(gridVariables))
235 , timeLoop_(timeLoop)
236 , prevSol_(&prevSol)
237 , isStationaryProblem_(false)
238 , warningIssued_(false)
239 {
240 std::cout << "Instantiated assembler for an instationary problem." << std::endl;
241
245 && getParam<bool>("Assembly.Multithreading", true);
246
247 maybeComputeColors_();
248 }
249
255 {
256 checkAssemblerState_();
257 resetJacobian_();
258 resetResidual_();
259
260 using namespace Dune::Hybrid;
261 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
262 {
263 auto& jacRow = (*jacobian_)[domainId];
264 auto& subRes = (*residual_)[domainId];
265 this->assembleJacobianAndResidual_(domainId, jacRow, subRes, curSol);
266
267 const auto gridGeometry = std::get<domainId>(gridGeometryTuple_);
268 enforcePeriodicConstraints_(domainId, jacRow, subRes, *gridGeometry, curSol[domainId]);
269 });
270 }
271
274 {
275 resetResidual_();
276 assembleResidual(*residual_, curSol);
277 }
278
281 {
282 r = 0.0;
283
284 checkAssemblerState_();
285
286 // update the grid variables for the case of active caching
287 updateGridVariables(curSol);
288
289 using namespace Dune::Hybrid;
290 forEach(integralRange(Dune::Hybrid::size(r)), [&](const auto domainId)
291 {
292 auto& subRes = r[domainId];
293 this->assembleResidual_(domainId, subRes, curSol);
294 });
295 }
296
299 {
301 setResidualSize_(residual);
302 assembleResidual(residual, curSol);
303
304 // calculate the squared norm of the residual
305 Scalar resultSquared = 0.0;
306
307 // for box communicate the residual with the neighboring processes
308 using namespace Dune::Hybrid;
309 forEach(integralRange(Dune::Hybrid::size(residual)), [&](const auto domainId)
310 {
311 const auto gridGeometry = std::get<domainId>(gridGeometryTuple_);
312 const auto& gridView = gridGeometry->gridView();
313
314 if (gridView.comm().size() > 1 && gridView.overlapSize(0) == 0)
315 {
317 {
318 using GV = typename GridGeometry<domainId>::GridView;
319 using DM = typename GridGeometry<domainId>::VertexMapper;
321
322 PVHelper vectorHelper(gridView, gridGeometry->vertexMapper());
323
324 vectorHelper.makeNonOverlappingConsistent(residual[domainId]);
325 }
326 }
327 else if (!warningIssued_)
328 {
329 if (gridView.comm().size() > 1 && gridView.comm().rank() == 0)
330 std::cout << "\nWarning: norm calculation adds entries corresponding to\n"
331 << "overlapping entities multiple times. Please use the norm\n"
332 << "function provided by a linear solver instead." << std::endl;
333
334 warningIssued_ = true;
335 }
336
337 Scalar localNormSquared = residual[domainId].two_norm2();
338
339 if (gridView.comm().size() > 1)
340 {
341 localNormSquared = gridView.comm().sum(localNormSquared);
342 }
343
344 resultSquared += localNormSquared;
345 });
346
347 using std::sqrt;
348 return sqrt(resultSquared);
349 }
350
356 void setLinearSystem(std::shared_ptr<JacobianMatrix> A,
357 std::shared_ptr<SolutionVector> r)
358 {
359 jacobian_ = A;
360 residual_ = r;
361
362 setJacobianBuildMode(*jacobian_);
363 setJacobianPattern_(*jacobian_);
364 setResidualSize_(*residual_);
365 }
366
372 {
373 jacobian_ = std::make_shared<JacobianMatrix>();
374 residual_ = std::make_shared<SolutionVector>();
375
376 setJacobianBuildMode(*jacobian_);
377 setJacobianPattern_(*jacobian_);
378 setResidualSize_(*residual_);
379 }
380
385 {
386 using namespace Dune::Hybrid;
387 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto i)
388 {
389 forEach(jac[i], [&](auto& jacBlock)
390 {
391 using BlockType = std::decay_t<decltype(jacBlock)>;
392 if (jacBlock.buildMode() == BlockType::BuildMode::unknown)
393 jacBlock.setBuildMode(BlockType::BuildMode::random);
394 else if (jacBlock.buildMode() != BlockType::BuildMode::random)
395 DUNE_THROW(Dune::NotImplemented, "Only BCRS matrices with random build mode are supported at the moment");
396 });
397 });
398 }
399
404 {
405 setResidualSize_();
406 setJacobianPattern_();
407 maybeComputeColors_();
408 }
409
414 {
415 using namespace Dune::Hybrid;
416 forEach(integralRange(Dune::Hybrid::size(gridVariablesTuple_)), [&](const auto domainId)
417 { this->gridVariables(domainId).update(curSol[domainId]); });
418 }
419
423 void resetTimeStep(const SolutionVector& curSol)
424 {
425 using namespace Dune::Hybrid;
426 forEach(integralRange(Dune::Hybrid::size(gridVariablesTuple_)), [&](const auto domainId)
427 { this->gridVariables(domainId).resetTimeStep(curSol[domainId]); });
428 }
429
431 template<std::size_t i>
432 std::size_t numDofs(Dune::index_constant<i> domainId) const
433 { return std::get<domainId>(gridGeometryTuple_)->numDofs(); }
434
436 template<std::size_t i>
437 const auto& problem(Dune::index_constant<i> domainId) const
438 { return *std::get<domainId>(problemTuple_); }
439
441 template<std::size_t i>
442 const auto& gridGeometry(Dune::index_constant<i> domainId) const
443 { return *std::get<domainId>(gridGeometryTuple_); }
444
446 template<std::size_t i>
447 const auto& gridView(Dune::index_constant<i> domainId) const
448 { return gridGeometry(domainId).gridView(); }
449
451 template<std::size_t i>
452 GridVariables<i>& gridVariables(Dune::index_constant<i> domainId)
453 { return *std::get<domainId>(gridVariablesTuple_); }
454
456 template<std::size_t i>
457 const GridVariables<i>& gridVariables(Dune::index_constant<i> domainId) const
458 { return *std::get<domainId>(gridVariablesTuple_); }
459
462 { return *couplingManager_; }
463
466 { return *jacobian_; }
467
470 { return *residual_; }
471
473 const SolutionVector& prevSol() const
474 { return *prevSol_; }
475
480 void setTimeManager(std::shared_ptr<const TimeLoop> timeLoop)
481 { timeLoop_ = timeLoop; isStationaryProblem_ = !(static_cast<bool>(timeLoop)); }
482
488 { prevSol_ = &u; }
489
494 { return isStationaryProblem_; }
495
499 template<std::size_t i>
500 LocalResidual<i> localResidual(Dune::index_constant<i> domainId) const
501 { return LocalResidual<i>(std::get<domainId>(problemTuple_).get(), timeLoop_.get()); }
502
503protected:
505 std::shared_ptr<CouplingManager> couplingManager_;
506
507private:
511 void setJacobianPattern_(JacobianMatrix& jac) const
512 {
513 using namespace Dune::Hybrid;
514 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainI)
515 {
516 forEach(integralRange(Dune::Hybrid::size(jac[domainI])), [&](const auto domainJ)
517 {
518 const auto pattern = this->getJacobianPattern_(domainI, domainJ);
519 pattern.exportIdx(jac[domainI][domainJ]);
520 });
521 });
522 }
523
527 void setResidualSize_(SolutionVector& res) const
528 {
529 using namespace Dune::Hybrid;
530 forEach(integralRange(Dune::Hybrid::size(res)), [&](const auto domainId)
531 { res[domainId].resize(this->numDofs(domainId)); });
532 }
533
534 // reset the residual vector to 0.0
535 void resetResidual_()
536 {
537 if(!residual_)
538 {
539 residual_ = std::make_shared<SolutionVector>();
540 setResidualSize_(*residual_);
541 }
542
543 (*residual_) = 0.0;
544 }
545
546 // reset the jacobian vector to 0.0
547 void resetJacobian_()
548 {
549 if(!jacobian_)
550 {
551 jacobian_ = std::make_shared<JacobianMatrix>();
552 setJacobianBuildMode(*jacobian_);
553 setJacobianPattern_(*jacobian_);
554 }
555
556 (*jacobian_) = 0.0;
557 }
558
560 void maybeComputeColors_()
561 {
562 if constexpr (CouplingManagerSupportsMultithreadedAssembly<CouplingManager>::value)
563 if (enableMultithreading_)
564 couplingManager_->computeColorsForAssembly();
565 }
566
567 // check if the assembler is in a correct state for assembly
568 void checkAssemblerState_() const
569 {
570 if (!isStationaryProblem_ && !prevSol_)
571 DUNE_THROW(Dune::InvalidStateException, "Assembling instationary problem but previous solution was not set!");
572
573 if (isStationaryProblem_ && prevSol_)
574 DUNE_THROW(Dune::InvalidStateException, "Assembling stationary problem but a previous solution was set."
575 << " Did you forget to set the timeLoop to make this problem instationary?");
576 }
577
578 template<std::size_t i, class JacRow, class SubRes>
579 void assembleJacobianAndResidual_(Dune::index_constant<i> domainId, JacRow& jacRow, SubRes& subRes,
580 const SolutionVector& curSol)
581 {
582 assemble_(domainId, [&](const auto& element)
583 {
584 SubDomainAssembler<i> subDomainAssembler(*this, element, curSol, *couplingManager_);
585 subDomainAssembler.assembleJacobianAndResidual(jacRow, subRes, gridVariablesTuple_);
586 });
587 }
588
589 template<std::size_t i, class SubRes>
590 void assembleResidual_(Dune::index_constant<i> domainId, SubRes& subRes,
591 const SolutionVector& curSol)
592 {
593 assemble_(domainId, [&](const auto& element)
594 {
595 SubDomainAssembler<i> subDomainAssembler(*this, element, curSol, *couplingManager_);
596 subDomainAssembler.assembleResidual(subRes);
597 });
598 }
599
605 template<std::size_t i, class AssembleElementFunc>
606 void assemble_(Dune::index_constant<i> domainId, AssembleElementFunc&& assembleElement) const
607 {
608 // a state that will be checked on all processes
609 bool succeeded = false;
610
611 // try assembling using the local assembly function
612 try
613 {
614 if constexpr (CouplingManagerSupportsMultithreadedAssembly<CouplingManager>::value)
615 {
616 if (enableMultithreading_)
617 {
618 couplingManager_->assembleMultithreaded(
619 domainId, std::forward<AssembleElementFunc>(assembleElement)
620 );
621 return;
622 }
623 }
624
625 // fallback for coupling managers that don't support multithreaded assembly (yet)
626 // or if multithreaded assembly is disabled
627 // let the local assembler add the element contributions
628 for (const auto& element : elements(gridView(domainId)))
629 assembleElement(element);
630
631 // if we get here, everything worked well on this process
632 succeeded = true;
633 }
634 // throw exception if a problem occurred
635 catch (NumericalProblem &e)
636 {
637 std::cout << "rank " << gridView(domainId).comm().rank()
638 << " caught an exception while assembling:" << e.what()
639 << "\n";
640 succeeded = false;
641 }
642
643 // make sure everything worked well on all processes
644 if (gridView(domainId).comm().size() > 1)
645 succeeded = gridView(domainId).comm().min(succeeded);
646
647 // if not succeeded rethrow the error on all processes
648 if (!succeeded)
649 DUNE_THROW(NumericalProblem, "A process did not succeed in linearizing the system");
650 }
651
652 // get diagonal block pattern
653 template<std::size_t i, std::size_t j, typename std::enable_if_t<(i==j), int> = 0>
654 Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant<i> domainI,
655 Dune::index_constant<j> domainJ) const
656 {
657 const auto& gg = gridGeometry(domainI);
658 auto pattern = getJacobianPattern<isImplicit()>(gg);
659 couplingManager_->extendJacobianPattern(domainI, pattern);
660 return pattern;
661 }
662
663 // get coupling block pattern
664 template<std::size_t i, std::size_t j, typename std::enable_if_t<(i!=j), int> = 0>
665 Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant<i> domainI,
666 Dune::index_constant<j> domainJ) const
667 {
668 return getCouplingJacobianPattern<isImplicit()>(*couplingManager_,
669 domainI, gridGeometry(domainI),
670 domainJ, gridGeometry(domainJ));
671 }
672
673 // build periodic constraints into the system matrix
674 template<std::size_t i, class JacRow, class Sol, class GG>
675 void enforcePeriodicConstraints_(Dune::index_constant<i> domainI, JacRow& jacRow, Sol& res, const GG& gridGeometry, const Sol& curSol)
676 {
677 if constexpr (GG::discMethod == DiscretizationMethods::box || GG::discMethod == DiscretizationMethods::fcstaggered)
678 {
679 for (const auto& m : gridGeometry.periodicVertexMap())
680 {
681 if (m.first < m.second)
682 {
683 auto& jac = jacRow[domainI];
684
685 // add the second row to the first
686 res[m.first] += res[m.second];
687
688 // enforce the solution of the first periodic DOF to the second one
689 res[m.second] = curSol[m.second] - curSol[m.first];
690
691 const auto end = jac[m.second].end();
692 for (auto it = jac[m.second].begin(); it != end; ++it)
693 jac[m.first][it.index()] += (*it);
694
695 // enforce constraint in second row
696 for (auto it = jac[m.second].begin(); it != end; ++it)
697 (*it) = it.index() == m.second ? 1.0 : it.index() == m.first ? -1.0 : 0.0;
698
699 using namespace Dune::Hybrid;
700 forEach(makeIncompleteIntegerSequence<JacRow::size(), domainI>(), [&](const auto couplingDomainId)
701 {
702 auto& jacCoupling = jacRow[couplingDomainId];
703
704 for (auto it = jacCoupling[m.second].begin(); it != jacCoupling[m.second].end(); ++it)
705 jacCoupling[m.first][it.index()] += (*it);
706
707 for (auto it = jacCoupling[m.second].begin(); it != jacCoupling[m.second].end(); ++it)
708 (*it) = 0.0;
709 });
710 }
711 }
712 }
713 }
714
716 ProblemTuple problemTuple_;
717
719 GridGeometryTuple gridGeometryTuple_;
720
722 GridVariablesTuple gridVariablesTuple_;
723
725 std::shared_ptr<const TimeLoop> timeLoop_;
726
728 const SolutionVector* prevSol_ = nullptr;
729
731 bool isStationaryProblem_;
732
734 std::shared_ptr<JacobianMatrix> jacobian_;
735 std::shared_ptr<SolutionVector> residual_;
736
738 bool warningIssued_;
739
741 bool enableMultithreading_ = false;
742};
743
744} // end namespace Dumux
745
746#endif
An enum class to define various differentiation methods available in order to compute the derivatives...
Helper function to generate Jacobian pattern for different discretization methods.
Utilities for template meta programming.
Some exceptions thrown in DuMux
dune-grid capabilities compatibility layer
The available discretization methods in Dumux.
Provides a helper class for nonoverlapping decomposition.
Helper function to generate Jacobian pattern for multi domain models.
An assembler for Jacobian and residual contribution per element (box methods) for multidomain problem...
A multidomain local assembler for Jacobian and residual contribution per element (cell-centered metho...
An assembler for Jacobian and residual contribution per element (face-centered diamond methods) for m...
An assembler for Jacobian and residual contribution per element (face-centered staggered methods) for...
An assembler for Jacobian and residual contribution per element for multidomain problems.
A multidomain assembler for Jacobian and residual contribution per element (staggered method)
Multithreading in Dumux.
constexpr bool isSerial()
Checking whether the backend is serial.
Definition: multithreading.hh:57
Adaption of the non-isothermal two-phase two-component flow model to problems with CO2.
Definition: adapt.hh:29
typename Detail::ConcatSeq< decltype(std::make_index_sequence< e >{}), e+1, decltype(std::make_index_sequence<(n > e) ?(n - e - 1) :0 >{})>::type makeIncompleteIntegerSequence
Definition: utility.hh:71
typename GetProp< TypeTag, Property >::type GetPropType
get the type alias defined in the property
Definition: propertysystem.hh:180
typename BlockTypeHelper< SolutionVector, Dune::IsNumber< SolutionVector >::value >::type BlockType
Definition: nonlinear/newtonsolver.hh:198
bool allGridsSupportsMultithreading(const std::tuple< GG... > &gridGeometries)
Definition: multidomain/fvassembler.hh:71
bool supportsMultithreading(const GridView &gridView)
Definition: gridcapabilities.hh:86
CVFE< CVFEMethods::CR_RT > FCDiamond
Definition: method.hh:90
constexpr Box box
Definition: method.hh:136
CVFE< CVFEMethods::PQ1 > Box
Definition: method.hh:83
CVFE< CVFEMethods::PQ1Bubble > PQ1Bubble
Definition: method.hh:97
constexpr FCStaggered fcstaggered
Definition: method.hh:140
bool allGridsSupportsMultithreadingImpl(const T &gridGeometries, std::index_sequence< I... >)
Definition: multidomain/fvassembler.hh:63
Definition: common/properties.hh:72
Manages the handling of time dependent problems.
Definition: common/timeloop.hh:68
The default time loop for instationary simulations.
Definition: common/timeloop.hh:113
Definition: parallelhelpers.hh:473
trait that is specialized for coupling manager supporting multithreaded assembly
Definition: multidomain/fvassembler.hh:85
A linear system assembler (residual and Jacobian) for finite volume schemes (box, tpfa,...
Definition: multidomain/fvassembler.hh:98
std::size_t numDofs(Dune::index_constant< i > domainId) const
the number of dof locations of domain i
Definition: multidomain/fvassembler.hh:432
void updateGridVariables(const SolutionVector &curSol)
Updates the grid variables with the given solution.
Definition: multidomain/fvassembler.hh:413
void updateAfterGridAdaption()
Resizes jacobian and residual and recomputes colors.
Definition: multidomain/fvassembler.hh:403
typename MDTraits::SolutionVector SolutionVector
Definition: multidomain/fvassembler.hh:121
SolutionVector ResidualType
Definition: multidomain/fvassembler.hh:122
typename MDTraits::template SubDomain< id >::Problem Problem
Definition: multidomain/fvassembler.hh:118
JacobianMatrix & jacobian()
the full Jacobian matrix
Definition: multidomain/fvassembler.hh:465
static constexpr bool isImplicit()
Returns true if the assembler considers implicit assembly.
Definition: multidomain/fvassembler.hh:129
void assembleResidual(ResidualType &r, const SolutionVector &curSol)
assemble a residual r
Definition: multidomain/fvassembler.hh:280
typename MDTraits::JacobianMatrix JacobianMatrix
Definition: multidomain/fvassembler.hh:120
CMType CouplingManager
Definition: multidomain/fvassembler.hh:124
void assembleResidual(const SolutionVector &curSol)
compute the residuals using the internal residual
Definition: multidomain/fvassembler.hh:273
void resetTimeStep(const SolutionVector &curSol)
Resets the grid variables to the last time step.
Definition: multidomain/fvassembler.hh:423
Scalar residualNorm(const SolutionVector &curSol)
compute the residual and return it's vector norm
Definition: multidomain/fvassembler.hh:298
const GridVariables< i > & gridVariables(Dune::index_constant< i > domainId) const
the grid variables of domain i
Definition: multidomain/fvassembler.hh:457
void setPreviousSolution(const SolutionVector &u)
Sets the solution from which to start the time integration. Has to be called prior to assembly for ti...
Definition: multidomain/fvassembler.hh:487
typename MDTraits::template SubDomain< id >::GridVariables GridVariables
Definition: multidomain/fvassembler.hh:112
void setLinearSystem(std::shared_ptr< JacobianMatrix > A, std::shared_ptr< SolutionVector > r)
Tells the assembler which jacobian and residual to use. This also resizes the containers to the requi...
Definition: multidomain/fvassembler.hh:356
const auto & gridGeometry(Dune::index_constant< i > domainId) const
the finite volume grid geometry of domain i
Definition: multidomain/fvassembler.hh:442
void setLinearSystem()
The version without arguments uses the default constructor to create the jacobian and residual object...
Definition: multidomain/fvassembler.hh:371
const auto & gridView(Dune::index_constant< i > domainId) const
the grid view of domain i
Definition: multidomain/fvassembler.hh:447
typename MDTraits::template SubDomain< id >::GridGeometry GridGeometry
Definition: multidomain/fvassembler.hh:115
const CouplingManager & couplingManager() const
the coupling manager
Definition: multidomain/fvassembler.hh:461
void setTimeManager(std::shared_ptr< const TimeLoop > timeLoop)
Set time loop for instationary problems.
Definition: multidomain/fvassembler.hh:480
const SolutionVector & prevSol() const
the solution of the previous time step
Definition: multidomain/fvassembler.hh:473
GridVariables< i > & gridVariables(Dune::index_constant< i > domainId)
the grid variables of domain i
Definition: multidomain/fvassembler.hh:452
std::shared_ptr< CouplingManager > couplingManager_
the coupling manager coupling the sub domains
Definition: multidomain/fvassembler.hh:505
MDTraits Traits
Definition: multidomain/fvassembler.hh:103
MultiDomainFVAssembler(ProblemTuple problem, GridGeometryTuple gridGeometry, GridVariablesTuple gridVariables, std::shared_ptr< CouplingManager > couplingManager, std::shared_ptr< const TimeLoop > timeLoop, const SolutionVector &prevSol)
The constructor for instationary problems.
Definition: multidomain/fvassembler.hh:225
void setJacobianBuildMode(JacobianMatrix &jac) const
Sets the jacobian build mode.
Definition: multidomain/fvassembler.hh:384
GetPropType< SubDomainTypeTag< id >, Properties::LocalResidual > LocalResidual
TODO get rid of this GetPropType.
Definition: multidomain/fvassembler.hh:109
LocalResidual< i > localResidual(Dune::index_constant< i > domainId) const
Create a local residual object (used by the local assembler)
Definition: multidomain/fvassembler.hh:500
const auto & problem(Dune::index_constant< i > domainId) const
the problem of domain i
Definition: multidomain/fvassembler.hh:437
MultiDomainFVAssembler(ProblemTuple problem, GridGeometryTuple gridGeometry, GridVariablesTuple gridVariables, std::shared_ptr< CouplingManager > couplingManager)
The constructor for stationary problems.
Definition: multidomain/fvassembler.hh:197
void assembleJacobianAndResidual(const SolutionVector &curSol)
Assembles the global Jacobian of the residual and the residual for the current solution.
Definition: multidomain/fvassembler.hh:254
bool isStationaryProblem() const
Whether we are assembling a stationary or instationary problem.
Definition: multidomain/fvassembler.hh:493
typename MDTraits::Scalar Scalar
Definition: multidomain/fvassembler.hh:105
SolutionVector & residual()
the full residual vector
Definition: multidomain/fvassembler.hh:469
The cell-centered scheme multidomain local assembler.
Definition: subdomaincclocalassembler.hh:282
Declares all properties used in Dumux.
Manages the handling of time dependent problems.