3 #ifndef DUNE_ISTL_FASTAMG_HH
4 #define DUNE_ISTL_FASTAMG_HH
7 #include <dune/common/exceptions.hh>
8 #include <dune/common/typetraits.hh>
9 #include <dune/common/unused.hh>
57 template<
class M,
class X,
class PI=SequentialInformation,
class A=std::allocator<X> >
151 matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
168 void createHierarchies(C& criterion,
169 const std::shared_ptr<const Operator>& matrixptr,
191 typename OperatorHierarchy::RedistributeInfoList::const_iterator
redist;
195 typename OperatorHierarchy::AggregatesMapList::const_iterator
aggregates;
215 void mgc(LevelContext& levelContext,
Domain& x,
const Range& b);
223 void presmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
231 void postsmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
239 void moveToFineLevel(LevelContext& levelContext,
bool processedFineLevel,
246 bool moveToCoarseLevel(LevelContext& levelContext);
252 void initIteratorsWithFineLevel(LevelContext& levelContext);
255 std::shared_ptr<OperatorHierarchy> matrices_;
257 std::shared_ptr<CoarseSolver> solver_;
259 std::shared_ptr<Hierarchy<Range,A>> rhs_;
261 std::shared_ptr<Hierarchy<Domain,A>> lhs_;
263 std::shared_ptr<Hierarchy<Domain,A>> residual_;
268 std::shared_ptr<ScalarProduct> scalarProduct_;
272 std::size_t preSteps_;
274 std::size_t postSteps_;
276 bool buildHierarchy_;
278 bool coarsesolverconverged;
280 typedef std::shared_ptr<Smoother> SmootherPointer;
281 SmootherPointer coarseSmoother_;
283 std::size_t verbosity_;
286 template<
class M,
class X,
class PI,
class A>
288 : matrices_(amg.matrices_), solver_(amg.solver_),
289 rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
290 gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
292 coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
295 template<
class M,
class X,
class PI,
class A>
298 : matrices_(stackobject_to_shared_ptr(matrices)), solver_(&coarseSolver),
299 rhs_(), lhs_(), residual_(), scalarProduct_(),
300 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
301 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
302 symmetric(symmetric_), coarsesolverconverged(true),
303 coarseSmoother_(), verbosity_(parms.debugLevel())
305 if(preSteps_>1||postSteps_>1)
307 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
308 preSteps_=postSteps_=0;
310 assert(matrices_->isBuilt());
311 static_assert(std::is_same<PI,SequentialInformation>::value,
312 "Currently only sequential runs are supported");
314 template<
class M,
class X,
class PI,
class A>
321 : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
322 preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
323 buildHierarchy_(true),
324 symmetric(symmetric_), coarsesolverconverged(true),
325 coarseSmoother_(), verbosity_(criterion.debugLevel())
327 if(preSteps_>1||postSteps_>1)
329 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
330 preSteps_=postSteps_=1;
332 static_assert(std::is_same<PI,SequentialInformation>::value,
333 "Currently only sequential runs are supported");
337 auto matrixptr = stackobject_to_shared_ptr(matrix);
338 createHierarchies(criterion, matrixptr, pinfo);
341 template<
class M,
class X,
class PI,
class A>
344 const std::shared_ptr<const Operator>& matrixptr,
348 matrices_ = std::make_shared<OperatorHierarchy>(
349 std::const_pointer_cast<Operator>(matrixptr),
350 stackobject_to_shared_ptr(
const_cast<PI&
>(pinfo)));
352 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
354 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
355 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
357 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
361 sargs.iterations = 1;
364 cargs.setArgs(sargs);
365 if(matrices_->redistributeInformation().back().isSetup()) {
367 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
368 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
370 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
371 cargs.setComm(*matrices_->parallelInformation().coarsest());
375 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
377 #if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
378 #if HAVE_SUITESPARSE_UMFPACK
379 #define DIRECTSOLVER UMFPack
381 #define DIRECTSOLVER SuperLU
384 if(std::is_same<ParallelInformation,SequentialInformation>::value
385 || matrices_->parallelInformation().coarsest()->communicator().size()==1
386 || (matrices_->parallelInformation().coarsest().isRedistributed()
387 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
388 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) {
389 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
390 std::cout<<
"Using superlu"<<std::endl;
391 if(matrices_->parallelInformation().coarsest().isRedistributed())
393 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
395 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(),
false,
false));
399 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(),
false,
false));
402 #endif // HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
404 if(matrices_->parallelInformation().coarsest().isRedistributed())
406 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
408 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(matrices_->matrices().coarsest().getRedistributed()),
410 *coarseSmoother_, 1E-2, 1000, 0));
414 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(*matrices_->matrices().coarsest()),
416 *coarseSmoother_, 1E-2, 1000, 0));
420 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
421 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
425 template<
class M,
class X,
class PI,
class A>
433 typedef typename M::matrix_type
Matrix;
440 const Matrix&
mat=matrices_->matrices().finest()->getmat();
441 for(RowIter row=
mat.begin(); row!=
mat.end(); ++row) {
442 bool isDirichlet =
true;
443 bool hasDiagonal =
false;
445 for(ColIter
col=row->begin();
col!=row->end(); ++
col) {
446 if(row.index()==
col.index()) {
448 hasDiagonal = (*
col != zero);
454 if(isDirichlet && hasDiagonal)
455 diag->solve(x[row.index()], b[row.index()]);
458 std::cout<<
" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
461 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
462 rhs_ = std::make_shared<Hierarchy<Range,A>>(std::make_shared<Range>(b));
463 lhs_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
464 residual_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
465 matrices_->coarsenVector(*rhs_);
466 matrices_->coarsenVector(*lhs_);
467 matrices_->coarsenVector(*residual_);
474 template<
class M,
class X,
class PI,
class A>
477 return matrices_->levels();
479 template<
class M,
class X,
class PI,
class A>
482 return matrices_->maxlevels();
486 template<
class M,
class X,
class PI,
class A>
489 LevelContext levelContext;
491 initIteratorsWithFineLevel(levelContext);
493 assert(v.two_norm()==0);
496 if(matrices_->maxlevels()==1){
499 mgc(levelContext, v, b);
501 mgc(levelContext, v, d);
502 if(postSteps_==0||matrices_->maxlevels()==1)
503 levelContext.pinfo->copyOwnerToAll(v, v);
506 template<
class M,
class X,
class PI,
class A>
509 levelContext.matrix = matrices_->matrices().finest();
510 levelContext.pinfo = matrices_->parallelInformation().finest();
511 levelContext.redist =
512 matrices_->redistributeInformation().begin();
513 levelContext.aggregates = matrices_->aggregatesMaps().begin();
514 levelContext.lhs = lhs_->finest();
515 levelContext.residual = residual_->finest();
516 levelContext.rhs = rhs_->finest();
517 levelContext.level=0;
520 template<
class M,
class X,
class PI,
class A>
521 bool FastAMG<M,X,PI,A>
522 ::moveToCoarseLevel(LevelContext& levelContext)
524 bool processNextLevel=
true;
526 if(levelContext.redist->isSetup()) {
528 levelContext.redist->redistribute(
static_cast<const Range&
>(*levelContext.residual),
529 levelContext.residual.getRedistributed());
530 processNextLevel = levelContext.residual.getRedistributed().size()>0;
531 if(processNextLevel) {
533 ++levelContext.pinfo;
536 static_cast<const Range&
>(levelContext.residual.getRedistributed()),
537 *levelContext.pinfo);
542 ++levelContext.pinfo;
545 static_cast<const Range&
>(*levelContext.residual), *levelContext.pinfo);
548 if(processNextLevel) {
550 ++levelContext.residual;
552 ++levelContext.matrix;
553 ++levelContext.level;
554 ++levelContext.redist;
556 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
558 ++levelContext.aggregates;
562 *levelContext.residual=0;
564 return processNextLevel;
567 template<
class M,
class X,
class PI,
class A>
568 void FastAMG<M,X,PI,A>
569 ::moveToFineLevel(LevelContext& levelContext,
bool processNextLevel, Domain& x)
571 if(processNextLevel) {
572 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
574 --levelContext.aggregates;
576 --levelContext.redist;
577 --levelContext.level;
579 --levelContext.matrix;
580 --levelContext.residual;
585 if(levelContext.redist->isSetup()) {
590 levelContext.lhs.getRedistributed(),
591 matrices_->getProlongationDampingFactor(),
592 *levelContext.pinfo, *levelContext.redist);
596 matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
602 if(processNextLevel) {
609 template<
class M,
class X,
class PI,
class A>
610 void FastAMG<M,X,PI,A>
611 ::presmooth(LevelContext& levelContext, Domain& x,
const Range& b)
615 *levelContext.residual,
619 template<
class M,
class X,
class PI,
class A>
620 void FastAMG<M,X,PI,A>
621 ::postsmooth(LevelContext& levelContext, Domain& x,
const Range& b)
624 ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
628 template<
class M,
class X,
class PI,
class A>
634 template<
class M,
class X,
class PI,
class A>
637 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
641 if(levelContext.redist->isSetup()) {
642 levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
643 if(levelContext.rhs.getRedistributed().size()>0) {
645 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
646 levelContext.rhs.getRedistributed());
647 solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
649 levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
650 levelContext.pinfo->copyOwnerToAll(v, v);
652 levelContext.pinfo->copyOwnerToAll(b, b);
653 solver_->apply(v,
const_cast<Range&
>(b), res);
659 coarsesolverconverged =
false;
665 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
666 bool processNextLevel = moveToCoarseLevel(levelContext);
668 if(processNextLevel) {
670 for(std::size_t i=0; i<gamma_; i++)
671 mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
674 moveToFineLevel(levelContext, processNextLevel, v);
679 if(levelContext.matrix == matrices_->matrices().finest()) {
680 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
681 if(!coarsesolverconverged)
682 DUNE_THROW(MathError,
"Coarse solver did not converge");
691 template<
class M,
class X,
class PI,
class A>
694 DUNE_UNUSED_PARAMETER(x);
700 template<
class M,
class X,
class PI,
class A>
704 matrices_->getCoarsestAggregatesOnFinest(cont);