version 3.8
multistagemultidomainfvassembler.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3//
4// SPDX-FileCopyrightInfo: Copyright © DuMux Project contributors, see AUTHORS.md in root folder
5// SPDX-License-Identifier: GPL-3.0-or-later
6//
15#ifndef DUMUX_EXPERIMENTAL_MULTISTAGE_MULTIDOMAIN_FV_ASSEMBLER_HH
16#define DUMUX_EXPERIMENTAL_MULTISTAGE_MULTIDOMAIN_FV_ASSEMBLER_HH
17
18#include <vector>
19#include <type_traits>
20#include <tuple>
21#include <memory>
22
23#include <dune/common/hybridutilities.hh>
24#include <dune/istl/matrixindexset.hh>
25
30
36
39
43
46
48
49namespace Detail {
50// helper for multi-domain models
51template<class T, std::size_t... I>
52bool allGridsSupportsMultithreadingImpl(const T& gridGeometries, std::index_sequence<I...>)
53{
54 return (... && supportsMultithreading(std::get<I>(gridGeometries)->gridView()));
55}
56} // end namespace Detail
57
58// helper for multi-domain models (all grids have to support multithreading)
59template<class... GG>
60bool allGridsSupportsMultithreading(const std::tuple<GG...>& gridGeometries)
61{
62 return Detail::allGridsSupportsMultithreadingImpl<std::tuple<GG...>>(gridGeometries, std::make_index_sequence<sizeof...(GG)>());
63}
64
65} // end namespace Dumux::Grid::Capabilities
66
67namespace Dumux {
68
76template<class CM>
77struct CouplingManagerSupportsMultithreadedAssembly : public std::false_type
78{};
79
80} // end namespace Dumux
81
82namespace Dumux::Experimental {
83
93template<class MDTraits, class CMType, DiffMethod diffMethod>
95{
96 template<std::size_t id>
97 using SubDomainTypeTag = typename MDTraits::template SubDomain<id>::TypeTag;
98
99public:
100 using Traits = MDTraits;
101
102 using Scalar = typename MDTraits::Scalar;
104
106 template<std::size_t id>
107 using LocalResidual = GetPropType<SubDomainTypeTag<id>, Properties::LocalResidual>;
108
109 template<std::size_t id>
110 using GridVariables = typename MDTraits::template SubDomain<id>::GridVariables;
111
112 template<std::size_t id>
113 using GridGeometry = typename MDTraits::template SubDomain<id>::GridGeometry;
114
115 template<std::size_t id>
116 using Problem = typename MDTraits::template SubDomain<id>::Problem;
117
118 using JacobianMatrix = typename MDTraits::JacobianMatrix;
119 using SolutionVector = typename MDTraits::SolutionVector;
120 using ResidualType = typename MDTraits::ResidualVector;
121
122 using CouplingManager = CMType;
123
124private:
125
126 using ProblemTuple = typename MDTraits::template TupleOfSharedPtrConst<Problem>;
127 using GridGeometryTuple = typename MDTraits::template TupleOfSharedPtrConst<GridGeometry>;
128 using GridVariablesTuple = typename MDTraits::template TupleOfSharedPtr<GridVariables>;
129
131
132 template<std::size_t id>
134
135 template<class DiscretizationMethod, std::size_t id>
136 struct SubDomainAssemblerType;
137
138 template<std::size_t id>
139 struct SubDomainAssemblerType<DiscretizationMethods::CCTpfa, id>
140 {
142 };
143
144 template<std::size_t id>
145 struct SubDomainAssemblerType<DiscretizationMethods::CCMpfa, id>
146 {
147 using type = Experimental::SubDomainCCLocalAssembler<id, SubDomainTypeTag<id>, SubDomainAssemblerView<id>, diffMethod>;
148 };
149
150 template<std::size_t id, class DM>
151 struct SubDomainAssemblerType<DiscretizationMethods::CVFE<DM>, id>
152 {
153 using type = Experimental::SubDomainCVFELocalAssembler<id, SubDomainTypeTag<id>, SubDomainAssemblerView<id>, diffMethod>;
154 };
155
156 template<std::size_t id>
157 using SubDomainAssembler = typename SubDomainAssemblerType<typename GridGeometry<id>::DiscretizationMethod, id>::type;
158
159public:
166 GridGeometryTuple gridGeometry,
167 GridVariablesTuple gridVariables,
168 std::shared_ptr<CouplingManager> couplingManager,
169 std::shared_ptr<const Experimental::MultiStageMethod<Scalar>> msMethod,
170 const SolutionVector& prevSol)
172 , timeSteppingMethod_(msMethod)
173 , problemTuple_(std::move(problem))
174 , gridGeometryTuple_(std::move(gridGeometry))
175 , gridVariablesTuple_(std::move(gridVariables))
176 , prevSol_(&prevSol)
177 {
178 std::cout << "Instantiated assembler for an instationary problem." << std::endl;
179
183 && getParam<bool>("Assembly.Multithreading", true);
184
185 maybeComputeColors_();
186 }
187
193 {
194 resetJacobian_();
195
196 resetResidual_();
197 spatialOperatorEvaluations_.back() = 0.0;
198 temporalOperatorEvaluations_.back() = 0.0;
199
200 if (stageParams_->size() != spatialOperatorEvaluations_.size())
201 DUNE_THROW(Dune::InvalidStateException, "Wrong number of residuals");
202
203 using namespace Dune::Hybrid;
204 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
205 {
206 // assemble the spatial and temporal residual of the current time step and the Jacobian
207 // w.r.t to the current solution (the current solution on the current stage)
208 auto& jacRow = (*jacobian_)[domainId];
209 auto& spatial = spatialOperatorEvaluations_.back()[domainId];
210 auto& temporal = temporalOperatorEvaluations_.back()[domainId];
211
212 assemble_(domainId, [&](const auto& element)
213 {
214 MultiDomainAssemblerSubDomainView view{*this, domainId};
215 SubDomainAssembler<domainId()> subDomainAssembler(view, element, curSol, *couplingManager_);
216 subDomainAssembler.assembleJacobianAndResidual(
217 jacRow, (*residual_)[domainId],
218 gridVariablesTuple_,
219 *stageParams_, temporal, spatial,
220 constrainedDofs_[domainId]
221 );
222 });
223
224 // assemble the full residual for the time integration stage
225 auto constantResidualComponent = (*residual_)[domainId];
226 constantResidualComponent = 0.0;
227 for (std::size_t k = 0; k < stageParams_->size()-1; ++k)
228 {
229 if (!stageParams_->skipTemporal(k))
230 constantResidualComponent.axpy(stageParams_->temporalWeight(k), temporalOperatorEvaluations_[k][domainId]);
231 if (!stageParams_->skipSpatial(k))
232 constantResidualComponent.axpy(stageParams_->spatialWeight(k), spatialOperatorEvaluations_[k][domainId]);
233 }
234
235 // masked summation of constant residual component onto this stage's resiudal component
236 for (std::size_t i = 0; i < constantResidualComponent.size(); ++i)
237 for (std::size_t ii = 0; ii < constantResidualComponent[i].size(); ++ii)
238 (*residual_)[domainId][i][ii] += constrainedDofs_[domainId][i][ii] > 0.5 ? 0.0 : constantResidualComponent[i][ii];
239 });
240 }
241
244 { DUNE_THROW(Dune::NotImplemented, "residual"); }
245
251 {
252 jacobian_ = std::make_shared<JacobianMatrix>();
253 residual_ = std::make_shared<ResidualType>();
254
255 setJacobianBuildMode_(*jacobian_);
256 setJacobianPattern_(*jacobian_);
257 setResidualSize_(*residual_);
258 }
259
264 {
265 using namespace Dune::Hybrid;
266 forEach(integralRange(Dune::Hybrid::size(gridVariablesTuple_)), [&](const auto domainId)
267 { this->gridVariables(domainId).update(curSol[domainId]); });
268 }
269
273 void resetTimeStep(const SolutionVector& curSol)
274 {
275 using namespace Dune::Hybrid;
276 forEach(integralRange(Dune::Hybrid::size(gridVariablesTuple_)), [&](const auto domainId)
277 { this->gridVariables(domainId).resetTimeStep(curSol[domainId]); });
278 }
279
281 template<std::size_t i>
282 std::size_t numDofs(Dune::index_constant<i> domainId) const
283 { return std::get<domainId>(gridGeometryTuple_)->numDofs(); }
284
286 template<std::size_t i>
287 const auto& problem(Dune::index_constant<i> domainId) const
288 { return *std::get<domainId>(problemTuple_); }
289
291 template<std::size_t i>
292 const auto& gridGeometry(Dune::index_constant<i> domainId) const
293 { return *std::get<domainId>(gridGeometryTuple_); }
294
296 template<std::size_t i>
297 const auto& gridView(Dune::index_constant<i> domainId) const
298 { return gridGeometry(domainId).gridView(); }
299
301 template<std::size_t i>
302 GridVariables<i>& gridVariables(Dune::index_constant<i> domainId)
303 { return *std::get<domainId>(gridVariablesTuple_); }
304
306 template<std::size_t i>
307 const GridVariables<i>& gridVariables(Dune::index_constant<i> domainId) const
308 { return *std::get<domainId>(gridVariablesTuple_); }
309
312 { return *couplingManager_; }
313
316 { return *jacobian_; }
317
320 { return *residual_; }
321
323 const SolutionVector& prevSol() const
324 { return *prevSol_; }
325
329 template<std::size_t i>
330 MultiStageFVLocalOperator<LocalResidual<i>> localResidual(Dune::index_constant<i> domainId) const
331 { return { LocalResidual<i>{std::get<domainId>(problemTuple_).get(), nullptr} }; }
332
334 {
335 spatialOperatorEvaluations_.clear();
336 temporalOperatorEvaluations_.clear();
337 stageParams_.reset();
338 }
339
340 template<class StageParams>
342 {
343 stageParams_ = std::move(params);
344 const auto curStage = stageParams_->size() - 1;
345
346 // in the first stage, also assemble the old residual
347 if (curStage == 1)
348 {
349 // update time in variables?
350 using namespace Dune::Hybrid;
351 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
352 {
353 setProblemTime_(*std::get<domainId>(problemTuple_), stageParams_->timeAtStage(curStage));
354 });
355
356 resetResidual_(); // residual resized and zero
357 spatialOperatorEvaluations_.push_back(*residual_);
358 temporalOperatorEvaluations_.push_back(*residual_);
359
360 // assemble stage 0 residuals
361 using namespace Dune::Hybrid;
362 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
363 {
364 auto& spatial = spatialOperatorEvaluations_.back()[domainId];
365 auto& temporal = temporalOperatorEvaluations_.back()[domainId];
366 assemble_(domainId, [&](const auto& element)
367 {
368 MultiDomainAssemblerSubDomainView view{*this, domainId};
369 SubDomainAssembler<domainId()> subDomainAssembler(view, element, x, *couplingManager_);
370 subDomainAssembler.localResidual().spatialWeight(1.0);
371 subDomainAssembler.localResidual().temporalWeight(1.0);
372 subDomainAssembler.assembleCurrentResidual(spatial, temporal);
373 });
374 });
375 }
376
377 // update time in variables?
378 using namespace Dune::Hybrid;
379 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
380 {
381 setProblemTime_(*std::get<domainId>(problemTuple_), stageParams_->timeAtStage(curStage));
382 });
383
384 resetResidual_(); // residual resized and zero
385 spatialOperatorEvaluations_.push_back(*residual_);
386 temporalOperatorEvaluations_.push_back(*residual_);
387 }
388
391 { return false; }
392
393 bool isImplicit() const
394 { return timeSteppingMethod_->implicit(); }
395
396protected:
398 std::shared_ptr<CouplingManager> couplingManager_;
399
400private:
404 void setJacobianBuildMode_(JacobianMatrix& jac) const
405 {
406 using namespace Dune::Hybrid;
407 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto i)
408 {
409 forEach(jac[i], [&](auto& jacBlock)
410 {
411 using BlockType = std::decay_t<decltype(jacBlock)>;
412 if (jacBlock.buildMode() == BlockType::BuildMode::unknown)
413 jacBlock.setBuildMode(BlockType::BuildMode::random);
414 else if (jacBlock.buildMode() != BlockType::BuildMode::random)
415 DUNE_THROW(Dune::NotImplemented, "Only BCRS matrices with random build mode are supported at the moment");
416 });
417 });
418 }
419
423 void setJacobianPattern_(JacobianMatrix& jac) const
424 {
425 using namespace Dune::Hybrid;
426 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainI)
427 {
428 forEach(integralRange(Dune::Hybrid::size(jac[domainI])), [&](const auto domainJ)
429 {
430 const auto pattern = this->getJacobianPattern_(domainI, domainJ);
431 pattern.exportIdx(jac[domainI][domainJ]);
432 });
433 });
434 }
435
439 void setResidualSize_(ResidualType& res) const
440 {
441 using namespace Dune::Hybrid;
442 forEach(integralRange(Dune::Hybrid::size(res)), [&](const auto domainId)
443 { res[domainId].resize(this->numDofs(domainId)); });
444 }
445
446 // reset the residual vector to 0.0
447 void resetResidual_()
448 {
449 if(!residual_)
450 {
451 residual_ = std::make_shared<ResidualType>();
452 setResidualSize_(*residual_);
453 }
454
455 setResidualSize_(constrainedDofs_);
456
457 (*residual_) = 0.0;
458 constrainedDofs_ = 0.0;
459 }
460
461 // reset the jacobian vector to 0.0
462 void resetJacobian_()
463 {
464 if(!jacobian_)
465 {
466 jacobian_ = std::make_shared<JacobianMatrix>();
467 setJacobianBuildMode_(*jacobian_);
468 setJacobianPattern_(*jacobian_);
469 }
470
471 (*jacobian_) = 0.0;
472 }
473
475 void maybeComputeColors_()
476 {
477 if constexpr (CouplingManagerSupportsMultithreadedAssembly<CouplingManager>::value)
478 if (enableMultithreading_)
479 couplingManager_->computeColorsForAssembly();
480 }
481
482 template<std::size_t i, class SubRes>
483 void assembleResidual_(Dune::index_constant<i> domainId, SubRes& subRes,
484 const SolutionVector& curSol)
485 {
486 DUNE_THROW(Dune::NotImplemented, "assembleResidual_");
487 }
488
494 template<std::size_t i, class AssembleElementFunc>
495 void assemble_(Dune::index_constant<i> domainId, AssembleElementFunc&& assembleElement) const
496 {
497 // a state that will be checked on all processes
498 bool succeeded = false;
499
500 // try assembling using the local assembly function
501 try
502 {
503 if constexpr (CouplingManagerSupportsMultithreadedAssembly<CouplingManager>::value)
504 {
505 if (enableMultithreading_)
506 {
507 couplingManager_->assembleMultithreaded(
508 domainId, std::forward<AssembleElementFunc>(assembleElement)
509 );
510 return;
511 }
512 }
513
514 // fallback for coupling managers that don't support multithreaded assembly (yet)
515 // or if multithreaded assembly is disabled
516 // let the local assembler add the element contributions
517 for (const auto& element : elements(gridView(domainId)))
518 assembleElement(element);
519
520 // if we get here, everything worked well on this process
521 succeeded = true;
522 }
523 // throw exception if a problem occurred
524 catch (NumericalProblem &e)
525 {
526 std::cout << "rank " << gridView(domainId).comm().rank()
527 << " caught an exception while assembling:" << e.what()
528 << "\n";
529 succeeded = false;
530 }
531
532 // make sure everything worked well on all processes
533 if (gridView(domainId).comm().size() > 1)
534 succeeded = gridView(domainId).comm().min(succeeded);
535
536 // if not succeeded rethrow the error on all processes
537 if (!succeeded)
538 DUNE_THROW(NumericalProblem, "A process did not succeed in linearizing the system");
539 }
540
541 // get diagonal block pattern
542 template<std::size_t i, std::size_t j, typename std::enable_if_t<(i==j), int> = 0>
543 Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant<i> domainI,
544 Dune::index_constant<j> domainJ) const
545 {
546 const auto& gg = gridGeometry(domainI);
547 if (timeSteppingMethod_->implicit())
548 {
549 auto pattern = getJacobianPattern<true>(gg);
550 couplingManager_->extendJacobianPattern(domainI, pattern);
551 return pattern;
552 }
553 else
554 {
555 auto pattern = getJacobianPattern<false>(gg);
556 couplingManager_->extendJacobianPattern(domainI, pattern);
557 return pattern;
558 }
559 }
560
561 // get coupling block pattern
562 template<std::size_t i, std::size_t j, typename std::enable_if_t<(i!=j), int> = 0>
563 Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant<i> domainI,
564 Dune::index_constant<j> domainJ) const
565 {
566 if (timeSteppingMethod_->implicit())
567 return getCouplingJacobianPattern<true>(*couplingManager_,
568 domainI, gridGeometry(domainI),
569 domainJ, gridGeometry(domainJ)
570 );
571 else
572 return getCouplingJacobianPattern<false>(*couplingManager_,
573 domainI, gridGeometry(domainI),
574 domainJ, gridGeometry(domainJ)
575 );
576 }
577
578 // TODO make this nicer with a is_detected trait in a common location
579 template<class P>
580 void setProblemTime_(const P& p, const Scalar t)
581 { setProblemTimeImpl_(p, t, 0); }
582
583 template<class P>
584 auto setProblemTimeImpl_(const P& p, const Scalar t, int) -> decltype(p.setTime(0))
585 { p.setTime(t); }
586
587 template<class P>
588 void setProblemTimeImpl_(const P& p, const Scalar t, long)
589 {}
590
591 std::shared_ptr<const Experimental::MultiStageMethod<Scalar>> timeSteppingMethod_;
592 std::vector<ResidualType> spatialOperatorEvaluations_;
593 std::vector<ResidualType> temporalOperatorEvaluations_;
594 ResidualType constrainedDofs_;
595 std::shared_ptr<const StageParams> stageParams_;
596
598 ProblemTuple problemTuple_;
599
601 GridGeometryTuple gridGeometryTuple_;
602
604 GridVariablesTuple gridVariablesTuple_;
605
607 const SolutionVector* prevSol_;
608
610 std::shared_ptr<JacobianMatrix> jacobian_;
611 std::shared_ptr<ResidualType> residual_;
612
614 bool enableMultithreading_ = false;
615};
616
617} // end namespace Dumux
618
619#endif
Subdomain-specific views on multidomain assemblers.
Definition: multistagefvlocaloperator.hh:23
Abstract interface for one-step multi-stage method parameters in Shu/Osher form.
Definition: multistagemethods.hh:75
A linear system assembler (residual and Jacobian) for finite volume schemes (box, tpfa,...
Definition: multistagemultidomainfvassembler.hh:95
MDTraits Traits
Definition: multistagemultidomainfvassembler.hh:100
typename MDTraits::SolutionVector SolutionVector
Definition: multistagemultidomainfvassembler.hh:119
GetPropType< SubDomainTypeTag< id >, Properties::LocalResidual > LocalResidual
TODO get rid of this GetPropType.
Definition: multistagemultidomainfvassembler.hh:107
typename MDTraits::JacobianMatrix JacobianMatrix
Definition: multistagemultidomainfvassembler.hh:118
typename MDTraits::Scalar Scalar
Definition: multistagemultidomainfvassembler.hh:102
bool isStationaryProblem() const
TODO get rid of this (called by Newton but shouldn't be necessary)
Definition: multistagemultidomainfvassembler.hh:390
void assembleJacobianAndResidual(const SolutionVector &curSol)
Assembles the global Jacobian of the residual and the residual for the current solution.
Definition: multistagemultidomainfvassembler.hh:192
const GridVariables< i > & gridVariables(Dune::index_constant< i > domainId) const
the grid variables of domain i
Definition: multistagemultidomainfvassembler.hh:307
typename MDTraits::template SubDomain< id >::GridGeometry GridGeometry
Definition: multistagemultidomainfvassembler.hh:113
void clearStages()
Definition: multistagemultidomainfvassembler.hh:333
MultiStageFVLocalOperator< LocalResidual< i > > localResidual(Dune::index_constant< i > domainId) const
Create a local residual object (used by the local assembler)
Definition: multistagemultidomainfvassembler.hh:330
typename MDTraits::template SubDomain< id >::Problem Problem
Definition: multistagemultidomainfvassembler.hh:116
bool isImplicit() const
Definition: multistagemultidomainfvassembler.hh:393
std::shared_ptr< CouplingManager > couplingManager_
the coupling manager coupling the sub domains
Definition: multistagemultidomainfvassembler.hh:398
typename MDTraits::ResidualVector ResidualType
Definition: multistagemultidomainfvassembler.hh:120
const CouplingManager & couplingManager() const
the coupling manager
Definition: multistagemultidomainfvassembler.hh:311
const auto & gridView(Dune::index_constant< i > domainId) const
the grid view of domain i
Definition: multistagemultidomainfvassembler.hh:297
ResidualType & residual()
the full residual vector
Definition: multistagemultidomainfvassembler.hh:319
const SolutionVector & prevSol() const
the solution before time integration
Definition: multistagemultidomainfvassembler.hh:323
void setLinearSystem()
The version without arguments uses the default constructor to create the jacobian and residual object...
Definition: multistagemultidomainfvassembler.hh:250
void resetTimeStep(const SolutionVector &curSol)
Resets the grid variables to the last time step.
Definition: multistagemultidomainfvassembler.hh:273
typename MDTraits::template SubDomain< id >::GridVariables GridVariables
Definition: multistagemultidomainfvassembler.hh:110
CMType CouplingManager
Definition: multistagemultidomainfvassembler.hh:122
std::size_t numDofs(Dune::index_constant< i > domainId) const
the number of dof locations of domain i
Definition: multistagemultidomainfvassembler.hh:282
void updateGridVariables(const SolutionVector &curSol)
Updates the grid variables with the given solution.
Definition: multistagemultidomainfvassembler.hh:263
const auto & problem(Dune::index_constant< i > domainId) const
the problem of domain i
Definition: multistagemultidomainfvassembler.hh:287
const auto & gridGeometry(Dune::index_constant< i > domainId) const
the finite volume grid geometry of domain i
Definition: multistagemultidomainfvassembler.hh:292
void prepareStage(SolutionVector &x, StageParams params)
Definition: multistagemultidomainfvassembler.hh:341
JacobianMatrix & jacobian()
the full Jacobian matrix
Definition: multistagemultidomainfvassembler.hh:315
GridVariables< i > & gridVariables(Dune::index_constant< i > domainId)
the grid variables of domain i
Definition: multistagemultidomainfvassembler.hh:302
MultiStageMultiDomainFVAssembler(ProblemTuple problem, GridGeometryTuple gridGeometry, GridVariablesTuple gridVariables, std::shared_ptr< CouplingManager > couplingManager, std::shared_ptr< const Experimental::MultiStageMethod< Scalar > > msMethod, const SolutionVector &prevSol)
The constructor for instationary problems.
Definition: multistagemultidomainfvassembler.hh:165
void assembleResidual(const SolutionVector &curSol)
compute the residuals using the internal residual
Definition: multistagemultidomainfvassembler.hh:243
Data object for the parameters of a given stage.
Definition: multistagetimestepper.hh:29
The cell-centered scheme multidomain local assembler.
Definition: experimental/assembly/subdomaincclocalassembler.hh:195
The CVFE scheme multidomain local assembler.
Definition: experimental/assembly/subdomaincvfelocalassembler.hh:206
Subdomain-specific view on a multidomain assembler. Allows retrieval of sub-domain specific objects w...
Definition: assemblerview.hh:31
Defines all properties used in Dumux.
Helper function to generate Jacobian pattern for multi domain models.
An enum class to define various differentiation methods available in order to compute the derivatives...
Some exceptions thrown in DuMux
A multidomain local assembler for Jacobian and residual contribution per element (cell-centered metho...
An assembler for Jacobian and residual contribution per element (CVFE methods) for multidomain proble...
dune-grid capabilities compatibility layer
constexpr bool isSerial()
Checking whether the backend is serial.
Definition: multithreading.hh:45
typename GetProp< TypeTag, Property >::type GetPropType
get the type alias defined in the property
Definition: propertysystem.hh:296
Helper function to generate Jacobian pattern for different discretization methods.
The available discretization methods in Dumux.
A local operator wrapper for multi-stage time stepping schemes.
Parameters for different multistage time stepping methods.
A time stepper performing a single time step of a transient simulation.
Multithreading in Dumux.
Definition: experimental/assembly/cclocalassembler.hh:36
bool allGridsSupportsMultithreadingImpl(const T &gridGeometries, std::index_sequence< I... >)
Definition: multistagemultidomainfvassembler.hh:52
Definition: gridcapabilities.hh:57
bool allGridsSupportsMultithreading(const std::tuple< GG... > &gridGeometries)
Definition: multistagemultidomainfvassembler.hh:60
bool supportsMultithreading(const GridView &gridView)
Definition: gridcapabilities.hh:74
Definition: adapt.hh:17
Provides a helper class for nonoverlapping decomposition.
Type trait that is specialized for coupling manager supporting multithreaded assembly.
Definition: multistagemultidomainfvassembler.hh:78
Utilities for template meta programming.