version 3.10-dev
multistagemultidomainfvassembler.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3//
4// SPDX-FileCopyrightInfo: Copyright © DuMux Project contributors, see AUTHORS.md in root folder
5// SPDX-License-Identifier: GPL-3.0-or-later
6//
15#ifndef DUMUX_EXPERIMENTAL_MULTISTAGE_MULTIDOMAIN_FV_ASSEMBLER_HH
16#define DUMUX_EXPERIMENTAL_MULTISTAGE_MULTIDOMAIN_FV_ASSEMBLER_HH
17
18#include <vector>
19#include <type_traits>
20#include <tuple>
21#include <memory>
22
23#include <dune/common/hybridutilities.hh>
24#include <dune/istl/matrixindexset.hh>
25
30
36
39
43
46
48
49namespace Detail {
50// helper for multi-domain models
51template<class T, std::size_t... I>
52bool allGridsSupportsMultithreadingImpl(const T& gridGeometries, std::index_sequence<I...>)
53{
54 return (... && supportsMultithreading(std::get<I>(gridGeometries)->gridView()));
55}
56} // end namespace Detail
57
58// helper for multi-domain models (all grids have to support multithreading)
59template<class... GG>
60bool allGridsSupportsMultithreading(const std::tuple<GG...>& gridGeometries)
61{
62 return Detail::allGridsSupportsMultithreadingImpl<std::tuple<GG...>>(gridGeometries, std::make_index_sequence<sizeof...(GG)>());
63}
64
65} // end namespace Dumux::Grid::Capabilities
66
67namespace Dumux {
68
76template<class CM>
77struct CouplingManagerSupportsMultithreadedAssembly : public std::false_type
78{};
79
80} // end namespace Dumux
81
82namespace Dumux::Experimental {
83
93template<class MDTraits, class CMType, DiffMethod diffMethod>
95{
96 template<std::size_t id>
97 using SubDomainTypeTag = typename MDTraits::template SubDomain<id>::TypeTag;
98
99public:
100 using Traits = MDTraits;
101
102 using Scalar = typename MDTraits::Scalar;
104
106 template<std::size_t id>
107 using LocalResidual = GetPropType<SubDomainTypeTag<id>, Properties::LocalResidual>;
108
109 template<std::size_t id>
110 using GridVariables = typename MDTraits::template SubDomain<id>::GridVariables;
111
112 template<std::size_t id>
113 using GridGeometry = typename MDTraits::template SubDomain<id>::GridGeometry;
114
115 template<std::size_t id>
116 using Problem = typename MDTraits::template SubDomain<id>::Problem;
117
118 using JacobianMatrix = typename MDTraits::JacobianMatrix;
119 using SolutionVector = typename MDTraits::SolutionVector;
120 using ResidualType = typename MDTraits::ResidualVector;
121
122 using CouplingManager = CMType;
123
124private:
125
126 using ProblemTuple = typename MDTraits::template TupleOfSharedPtrConst<Problem>;
127 using GridGeometryTuple = typename MDTraits::template TupleOfSharedPtrConst<GridGeometry>;
128 using GridVariablesTuple = typename MDTraits::template TupleOfSharedPtr<GridVariables>;
129
131
132 template<std::size_t id>
134
135 template<class DiscretizationMethod, std::size_t id>
136 struct SubDomainAssemblerType;
137
138 template<std::size_t id>
139 struct SubDomainAssemblerType<DiscretizationMethods::CCTpfa, id>
140 {
142 };
143
144 template<std::size_t id>
145 struct SubDomainAssemblerType<DiscretizationMethods::CCMpfa, id>
146 {
147 using type = Experimental::SubDomainCCLocalAssembler<id, SubDomainTypeTag<id>, SubDomainAssemblerView<id>, diffMethod>;
148 };
149
150 template<std::size_t id, class DM>
151 struct SubDomainAssemblerType<DiscretizationMethods::CVFE<DM>, id>
152 {
153 using type = Experimental::SubDomainCVFELocalAssembler<id, SubDomainTypeTag<id>, SubDomainAssemblerView<id>, diffMethod>;
154 };
155
156 template<std::size_t id>
157 using SubDomainAssembler = typename SubDomainAssemblerType<typename GridGeometry<id>::DiscretizationMethod, id>::type;
158
159public:
166 GridGeometryTuple gridGeometry,
167 GridVariablesTuple gridVariables,
168 std::shared_ptr<CouplingManager> couplingManager,
169 std::shared_ptr<const Experimental::MultiStageMethod<Scalar>> msMethod,
170 const SolutionVector& prevSol)
172 , timeSteppingMethod_(msMethod)
173 , problemTuple_(std::move(problem))
174 , gridGeometryTuple_(std::move(gridGeometry))
175 , gridVariablesTuple_(std::move(gridVariables))
176 , prevSol_(&prevSol)
177 {
178 std::cout << "Instantiated assembler for an instationary problem." << std::endl;
179
183 && getParam<bool>("Assembly.Multithreading", true);
184
185 maybeComputeColors_();
186 }
187
193 {
194 resetJacobian_();
195
196 resetResidual_();
197 spatialOperatorEvaluations_.back() = 0.0;
198 temporalOperatorEvaluations_.back() = 0.0;
199
200 if (stageParams_->size() != spatialOperatorEvaluations_.size())
201 DUNE_THROW(Dune::InvalidStateException, "Wrong number of residuals");
202
203 using namespace Dune::Hybrid;
204 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
205 {
206 // assemble the spatial and temporal residual of the current time step and the Jacobian
207 // w.r.t to the current solution (the current solution on the current stage)
208 auto& jacRow = (*jacobian_)[domainId];
209 auto& spatial = spatialOperatorEvaluations_.back()[domainId];
210 auto& temporal = temporalOperatorEvaluations_.back()[domainId];
211
212 assemble_(domainId, [&](const auto& element)
213 {
214 MultiDomainAssemblerSubDomainView view{*this, domainId};
215 SubDomainAssembler<domainId()> subDomainAssembler(view, element, curSol, *couplingManager_);
216 subDomainAssembler.assembleJacobianAndResidual(
217 jacRow, (*residual_)[domainId],
218 gridVariablesTuple_,
219 *stageParams_, temporal, spatial,
220 constrainedDofs_[domainId]
221 );
222 });
223
224 // assemble the full residual for the time integration stage
225 auto constantResidualComponent = (*residual_)[domainId];
226 constantResidualComponent = 0.0;
227 for (std::size_t k = 0; k < stageParams_->size()-1; ++k)
228 {
229 if (!stageParams_->skipTemporal(k))
230 constantResidualComponent.axpy(stageParams_->temporalWeight(k), temporalOperatorEvaluations_[k][domainId]);
231 if (!stageParams_->skipSpatial(k))
232 constantResidualComponent.axpy(stageParams_->spatialWeight(k), spatialOperatorEvaluations_[k][domainId]);
233 }
234
235 // masked summation of constant residual component onto this stage's resiudal component
236 for (std::size_t i = 0; i < constantResidualComponent.size(); ++i)
237 for (std::size_t ii = 0; ii < constantResidualComponent[i].size(); ++ii)
238 (*residual_)[domainId][i][ii] += constrainedDofs_[domainId][i][ii] > 0.5 ? 0.0 : constantResidualComponent[i][ii];
239 });
240 }
241
244 { DUNE_THROW(Dune::NotImplemented, "residual"); }
245
251 {
252 jacobian_ = std::make_shared<JacobianMatrix>();
253 residual_ = std::make_shared<ResidualType>();
254
255 setJacobianBuildMode_(*jacobian_);
256 setJacobianPattern_(*jacobian_);
257 setResidualSize_(*residual_);
258 }
259
264 {
265 using namespace Dune::Hybrid;
266 forEach(integralRange(Dune::Hybrid::size(gridVariablesTuple_)), [&](const auto domainId)
267 { this->gridVariables(domainId).update(curSol[domainId]); });
268 }
269
273 void resetTimeStep(const SolutionVector& curSol)
274 {
275 using namespace Dune::Hybrid;
276 forEach(integralRange(Dune::Hybrid::size(gridVariablesTuple_)), [&](const auto domainId)
277 { this->gridVariables(domainId).resetTimeStep(curSol[domainId]); });
278
279 this->clearStages();
280 }
281
283 template<std::size_t i>
284 std::size_t numDofs(Dune::index_constant<i> domainId) const
285 { return std::get<domainId>(gridGeometryTuple_)->numDofs(); }
286
288 template<std::size_t i>
289 const auto& problem(Dune::index_constant<i> domainId) const
290 { return *std::get<domainId>(problemTuple_); }
291
293 template<std::size_t i>
294 const auto& gridGeometry(Dune::index_constant<i> domainId) const
295 { return *std::get<domainId>(gridGeometryTuple_); }
296
298 template<std::size_t i>
299 const auto& gridView(Dune::index_constant<i> domainId) const
300 { return gridGeometry(domainId).gridView(); }
301
303 template<std::size_t i>
304 GridVariables<i>& gridVariables(Dune::index_constant<i> domainId)
305 { return *std::get<domainId>(gridVariablesTuple_); }
306
308 template<std::size_t i>
309 const GridVariables<i>& gridVariables(Dune::index_constant<i> domainId) const
310 { return *std::get<domainId>(gridVariablesTuple_); }
311
314 { return *couplingManager_; }
315
318 { return *jacobian_; }
319
322 { return *residual_; }
323
325 const SolutionVector& prevSol() const
326 { return *prevSol_; }
327
331 template<std::size_t i>
332 MultiStageFVLocalOperator<LocalResidual<i>> localResidual(Dune::index_constant<i> domainId) const
333 { return { LocalResidual<i>{std::get<domainId>(problemTuple_).get(), nullptr} }; }
334
336 {
337 spatialOperatorEvaluations_.clear();
338 temporalOperatorEvaluations_.clear();
339 stageParams_.reset();
340 }
341
342 template<class StageParams>
344 {
345 stageParams_ = std::move(params);
346 const auto curStage = stageParams_->size() - 1;
347
348 // in the first stage, also assemble the old residual
349 if (curStage == 1)
350 {
351 // update time in variables?
352 using namespace Dune::Hybrid;
353 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
354 {
355 setProblemTime_(*std::get<domainId>(problemTuple_), stageParams_->timeAtStage(curStage));
356 });
357
358 resetResidual_(); // residual resized and zero
359 spatialOperatorEvaluations_.push_back(*residual_);
360 temporalOperatorEvaluations_.push_back(*residual_);
361
362 // assemble stage 0 residuals
363 using namespace Dune::Hybrid;
364 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
365 {
366 auto& spatial = spatialOperatorEvaluations_.back()[domainId];
367 auto& temporal = temporalOperatorEvaluations_.back()[domainId];
368 assemble_(domainId, [&](const auto& element)
369 {
370 MultiDomainAssemblerSubDomainView view{*this, domainId};
371 SubDomainAssembler<domainId()> subDomainAssembler(view, element, x, *couplingManager_);
372 subDomainAssembler.localResidual().spatialWeight(1.0);
373 subDomainAssembler.localResidual().temporalWeight(1.0);
374 subDomainAssembler.assembleCurrentResidual(spatial, temporal);
375 });
376 });
377 }
378
379 // update time in variables?
380 using namespace Dune::Hybrid;
381 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainId)
382 {
383 setProblemTime_(*std::get<domainId>(problemTuple_), stageParams_->timeAtStage(curStage));
384 });
385
386 resetResidual_(); // residual resized and zero
387 spatialOperatorEvaluations_.push_back(*residual_);
388 temporalOperatorEvaluations_.push_back(*residual_);
389 }
390
393 { return false; }
394
395 bool isImplicit() const
396 { return timeSteppingMethod_->implicit(); }
397
398protected:
400 std::shared_ptr<CouplingManager> couplingManager_;
401
402private:
406 void setJacobianBuildMode_(JacobianMatrix& jac) const
407 {
408 using namespace Dune::Hybrid;
409 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto i)
410 {
411 forEach(jac[i], [&](auto& jacBlock)
412 {
413 using BlockType = std::decay_t<decltype(jacBlock)>;
414 if (jacBlock.buildMode() == BlockType::BuildMode::unknown)
415 jacBlock.setBuildMode(BlockType::BuildMode::random);
416 else if (jacBlock.buildMode() != BlockType::BuildMode::random)
417 DUNE_THROW(Dune::NotImplemented, "Only BCRS matrices with random build mode are supported at the moment");
418 });
419 });
420 }
421
425 void setJacobianPattern_(JacobianMatrix& jac) const
426 {
427 using namespace Dune::Hybrid;
428 forEach(std::make_index_sequence<JacobianMatrix::N()>(), [&](const auto domainI)
429 {
430 forEach(integralRange(Dune::Hybrid::size(jac[domainI])), [&](const auto domainJ)
431 {
432 const auto pattern = this->getJacobianPattern_(domainI, domainJ);
433 pattern.exportIdx(jac[domainI][domainJ]);
434 });
435 });
436 }
437
441 void setResidualSize_(ResidualType& res) const
442 {
443 using namespace Dune::Hybrid;
444 forEach(integralRange(Dune::Hybrid::size(res)), [&](const auto domainId)
445 { res[domainId].resize(this->numDofs(domainId)); });
446 }
447
448 // reset the residual vector to 0.0
449 void resetResidual_()
450 {
451 if(!residual_)
452 {
453 residual_ = std::make_shared<ResidualType>();
454 setResidualSize_(*residual_);
455 }
456
457 setResidualSize_(constrainedDofs_);
458
459 (*residual_) = 0.0;
460 constrainedDofs_ = 0.0;
461 }
462
463 // reset the jacobian vector to 0.0
464 void resetJacobian_()
465 {
466 if(!jacobian_)
467 {
468 jacobian_ = std::make_shared<JacobianMatrix>();
469 setJacobianBuildMode_(*jacobian_);
470 setJacobianPattern_(*jacobian_);
471 }
472
473 (*jacobian_) = 0.0;
474 }
475
477 void maybeComputeColors_()
478 {
479 if constexpr (CouplingManagerSupportsMultithreadedAssembly<CouplingManager>::value)
480 if (enableMultithreading_)
481 couplingManager_->computeColorsForAssembly();
482 }
483
484 template<std::size_t i, class SubRes>
485 void assembleResidual_(Dune::index_constant<i> domainId, SubRes& subRes,
486 const SolutionVector& curSol)
487 {
488 DUNE_THROW(Dune::NotImplemented, "assembleResidual_");
489 }
490
496 template<std::size_t i, class AssembleElementFunc>
497 void assemble_(Dune::index_constant<i> domainId, AssembleElementFunc&& assembleElement) const
498 {
499 // a state that will be checked on all processes
500 bool succeeded = false;
501
502 // try assembling using the local assembly function
503 try
504 {
505 if constexpr (CouplingManagerSupportsMultithreadedAssembly<CouplingManager>::value)
506 {
507 if (enableMultithreading_)
508 {
509 couplingManager_->assembleMultithreaded(
510 domainId, std::forward<AssembleElementFunc>(assembleElement)
511 );
512 return;
513 }
514 }
515
516 // fallback for coupling managers that don't support multithreaded assembly (yet)
517 // or if multithreaded assembly is disabled
518 // let the local assembler add the element contributions
519 for (const auto& element : elements(gridView(domainId)))
520 assembleElement(element);
521
522 // if we get here, everything worked well on this process
523 succeeded = true;
524 }
525 // throw exception if a problem occurred
526 catch (NumericalProblem &e)
527 {
528 std::cout << "rank " << gridView(domainId).comm().rank()
529 << " caught an exception while assembling:" << e.what()
530 << "\n";
531 succeeded = false;
532 }
533
534 // make sure everything worked well on all processes
535 if (gridView(domainId).comm().size() > 1)
536 succeeded = gridView(domainId).comm().min(succeeded);
537
538 // if not succeeded rethrow the error on all processes
539 if (!succeeded)
540 DUNE_THROW(NumericalProblem, "A process did not succeed in linearizing the system");
541 }
542
543 // get diagonal block pattern
544 template<std::size_t i, std::size_t j, typename std::enable_if_t<(i==j), int> = 0>
545 Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant<i> domainI,
546 Dune::index_constant<j> domainJ) const
547 {
548 const auto& gg = gridGeometry(domainI);
549 if (timeSteppingMethod_->implicit())
550 {
551 auto pattern = getJacobianPattern<true>(gg);
552 couplingManager_->extendJacobianPattern(domainI, pattern);
553 return pattern;
554 }
555 else
556 {
557 auto pattern = getJacobianPattern<false>(gg);
558 couplingManager_->extendJacobianPattern(domainI, pattern);
559 return pattern;
560 }
561 }
562
563 // get coupling block pattern
564 template<std::size_t i, std::size_t j, typename std::enable_if_t<(i!=j), int> = 0>
565 Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant<i> domainI,
566 Dune::index_constant<j> domainJ) const
567 {
568 if (timeSteppingMethod_->implicit())
569 return getCouplingJacobianPattern<true>(*couplingManager_,
570 domainI, gridGeometry(domainI),
571 domainJ, gridGeometry(domainJ)
572 );
573 else
574 return getCouplingJacobianPattern<false>(*couplingManager_,
575 domainI, gridGeometry(domainI),
576 domainJ, gridGeometry(domainJ)
577 );
578 }
579
580 // TODO make this nicer with a is_detected trait in a common location
581 template<class P>
582 void setProblemTime_(const P& p, const Scalar t)
583 { setProblemTimeImpl_(p, t, 0); }
584
585 template<class P>
586 auto setProblemTimeImpl_(const P& p, const Scalar t, int) -> decltype(p.setTime(0))
587 { p.setTime(t); }
588
589 template<class P>
590 void setProblemTimeImpl_(const P& p, const Scalar t, long)
591 {}
592
593 std::shared_ptr<const Experimental::MultiStageMethod<Scalar>> timeSteppingMethod_;
594 std::vector<ResidualType> spatialOperatorEvaluations_;
595 std::vector<ResidualType> temporalOperatorEvaluations_;
596 ResidualType constrainedDofs_;
597 std::shared_ptr<const StageParams> stageParams_;
598
600 ProblemTuple problemTuple_;
601
603 GridGeometryTuple gridGeometryTuple_;
604
606 GridVariablesTuple gridVariablesTuple_;
607
609 const SolutionVector* prevSol_;
610
612 std::shared_ptr<JacobianMatrix> jacobian_;
613 std::shared_ptr<ResidualType> residual_;
614
616 bool enableMultithreading_ = false;
617};
618
619} // end namespace Dumux
620
621#endif
Subdomain-specific views on multidomain assemblers.
Definition: multistagefvlocaloperator.hh:23
Abstract interface for one-step multi-stage method parameters in Shu/Osher form.
Definition: multistagemethods.hh:75
A linear system assembler (residual and Jacobian) for finite volume schemes (box, tpfa,...
Definition: multistagemultidomainfvassembler.hh:95
MDTraits Traits
Definition: multistagemultidomainfvassembler.hh:100
typename MDTraits::SolutionVector SolutionVector
Definition: multistagemultidomainfvassembler.hh:119
GetPropType< SubDomainTypeTag< id >, Properties::LocalResidual > LocalResidual
TODO get rid of this GetPropType.
Definition: multistagemultidomainfvassembler.hh:107
typename MDTraits::JacobianMatrix JacobianMatrix
Definition: multistagemultidomainfvassembler.hh:118
typename MDTraits::Scalar Scalar
Definition: multistagemultidomainfvassembler.hh:102
bool isStationaryProblem() const
TODO get rid of this (called by Newton but shouldn't be necessary)
Definition: multistagemultidomainfvassembler.hh:392
void assembleJacobianAndResidual(const SolutionVector &curSol)
Assembles the global Jacobian of the residual and the residual for the current solution.
Definition: multistagemultidomainfvassembler.hh:192
const GridVariables< i > & gridVariables(Dune::index_constant< i > domainId) const
the grid variables of domain i
Definition: multistagemultidomainfvassembler.hh:309
typename MDTraits::template SubDomain< id >::GridGeometry GridGeometry
Definition: multistagemultidomainfvassembler.hh:113
void clearStages()
Definition: multistagemultidomainfvassembler.hh:335
MultiStageFVLocalOperator< LocalResidual< i > > localResidual(Dune::index_constant< i > domainId) const
Create a local residual object (used by the local assembler)
Definition: multistagemultidomainfvassembler.hh:332
typename MDTraits::template SubDomain< id >::Problem Problem
Definition: multistagemultidomainfvassembler.hh:116
bool isImplicit() const
Definition: multistagemultidomainfvassembler.hh:395
std::shared_ptr< CouplingManager > couplingManager_
the coupling manager coupling the sub domains
Definition: multistagemultidomainfvassembler.hh:400
typename MDTraits::ResidualVector ResidualType
Definition: multistagemultidomainfvassembler.hh:120
const CouplingManager & couplingManager() const
the coupling manager
Definition: multistagemultidomainfvassembler.hh:313
const auto & gridView(Dune::index_constant< i > domainId) const
the grid view of domain i
Definition: multistagemultidomainfvassembler.hh:299
ResidualType & residual()
the full residual vector
Definition: multistagemultidomainfvassembler.hh:321
const SolutionVector & prevSol() const
the solution before time integration
Definition: multistagemultidomainfvassembler.hh:325
void setLinearSystem()
The version without arguments uses the default constructor to create the jacobian and residual object...
Definition: multistagemultidomainfvassembler.hh:250
void resetTimeStep(const SolutionVector &curSol)
Resets the grid variables to the last time step.
Definition: multistagemultidomainfvassembler.hh:273
typename MDTraits::template SubDomain< id >::GridVariables GridVariables
Definition: multistagemultidomainfvassembler.hh:110
CMType CouplingManager
Definition: multistagemultidomainfvassembler.hh:122
std::size_t numDofs(Dune::index_constant< i > domainId) const
the number of dof locations of domain i
Definition: multistagemultidomainfvassembler.hh:284
void updateGridVariables(const SolutionVector &curSol)
Updates the grid variables with the given solution.
Definition: multistagemultidomainfvassembler.hh:263
const auto & problem(Dune::index_constant< i > domainId) const
the problem of domain i
Definition: multistagemultidomainfvassembler.hh:289
const auto & gridGeometry(Dune::index_constant< i > domainId) const
the finite volume grid geometry of domain i
Definition: multistagemultidomainfvassembler.hh:294
void prepareStage(SolutionVector &x, StageParams params)
Definition: multistagemultidomainfvassembler.hh:343
JacobianMatrix & jacobian()
the full Jacobian matrix
Definition: multistagemultidomainfvassembler.hh:317
GridVariables< i > & gridVariables(Dune::index_constant< i > domainId)
the grid variables of domain i
Definition: multistagemultidomainfvassembler.hh:304
MultiStageMultiDomainFVAssembler(ProblemTuple problem, GridGeometryTuple gridGeometry, GridVariablesTuple gridVariables, std::shared_ptr< CouplingManager > couplingManager, std::shared_ptr< const Experimental::MultiStageMethod< Scalar > > msMethod, const SolutionVector &prevSol)
The constructor for instationary problems.
Definition: multistagemultidomainfvassembler.hh:165
void assembleResidual(const SolutionVector &curSol)
compute the residuals using the internal residual
Definition: multistagemultidomainfvassembler.hh:243
Data object for the parameters of a given stage.
Definition: multistagetimestepper.hh:31
The cell-centered scheme multidomain local assembler.
Definition: experimental/assembly/subdomaincclocalassembler.hh:195
The CVFE scheme multidomain local assembler.
Definition: experimental/assembly/subdomaincvfelocalassembler.hh:206
Subdomain-specific view on a multidomain assembler. Allows retrieval of sub-domain specific objects w...
Definition: assemblerview.hh:31
Defines all properties used in Dumux.
Helper function to generate Jacobian pattern for multi domain models.
An enum class to define various differentiation methods available in order to compute the derivatives...
Some exceptions thrown in DuMux
A multidomain local assembler for Jacobian and residual contribution per element (cell-centered metho...
An assembler for Jacobian and residual contribution per element (CVFE methods) for multidomain proble...
dune-grid capabilities compatibility layer
constexpr bool isSerial()
Checking whether the backend is serial.
Definition: multithreading.hh:45
typename GetProp< TypeTag, Property >::type GetPropType
get the type alias defined in the property
Definition: propertysystem.hh:296
Helper function to generate Jacobian pattern for different discretization methods.
The available discretization methods in Dumux.
A local operator wrapper for multi-stage time stepping schemes.
Parameters for different multistage time stepping methods.
A time stepper performing a single time step of a transient simulation.
Multithreading in Dumux.
Definition: experimental/assembly/cclocalassembler.hh:36
bool allGridsSupportsMultithreadingImpl(const T &gridGeometries, std::index_sequence< I... >)
Definition: multistagemultidomainfvassembler.hh:52
Definition: gridcapabilities.hh:57
bool allGridsSupportsMultithreading(const std::tuple< GG... > &gridGeometries)
Definition: multistagemultidomainfvassembler.hh:60
bool supportsMultithreading(const GridView &gridView)
Definition: gridcapabilities.hh:74
Definition: adapt.hh:17
Provides a helper class for nonoverlapping decomposition.
Type trait that is specialized for coupling manager supporting multithreaded assembly.
Definition: multistagemultidomainfvassembler.hh:78
Utilities for template meta programming.