Antares Xpansion
Investment simulations for Antares studies
Loading...
Searching...
No Matches
BendersMPI.h
1#pragma once
2
3#include "antares-xpansion/benders/benders_core/BendersBase.h"
4#include "antares-xpansion/benders/benders_core/SubproblemCut.h"
5#include "antares-xpansion/benders/benders_core/SubproblemWorker.h"
6#include "antares-xpansion/benders/benders_core/Worker.h"
7#include "antares-xpansion/helpers/Timer.h"
8#include "antares-xpansion/xpansion_interfaces/ILogger.h"
9#include "common_mpi.h"
10
16{
17public:
18 ~BendersMpi() override = default;
19 BendersMpi(const BendersBaseOptions& options,
20 std::shared_ptr<ILogger> logger,
21 std::shared_ptr<Output::OutputWriter> writer,
22 mpi::communicator& world,
23 std::shared_ptr<MathLoggerDriver> mathLoggerDriver);
24
25 void launch() override;
26
27 std::string BendersName() const override
28 {
29 return "Benders mpi";
30 }
31
32 const int rank_0 = 0;
33
34protected:
35 void free() override;
36 void Run() override;
37 void InitializeProblems() override;
38 void BroadcastXCut();
39 void master_build_cuts(const std::vector<SubProblemDataMap>& gathered_subproblem_map);
40 void SetSubproblemDataCostAndSimplexIter(
41 const std::vector<SubProblemDataMap>& gathered_subproblem_map);
42
43 mpi::communicator& _world;
44
45private:
46 void step_1_solve_master();
47 void step_2_solve_subproblems_and_build_cuts();
48 void step_4_update_best_solution(int rank);
49
50 SubProblemDataMap get_subproblem_cut_package();
51
52 void solve_master_and_create_trace();
53
54 void do_solve_master_create_trace_and_update_cuts();
55
56 virtual void gather_subproblems_cut_package_and_build_cuts(
57 const SubProblemDataMap& subproblem_data_map,
58 const Timer& process_timer);
59
60 void write_exception_message(const std::exception& ex) const;
61
62 void check_if_some_proc_had_a_failure(int success);
63
64protected:
65 [[nodiscard]] bool shouldParallelize() const final
66 {
67 return false;
68 }
69
70 void PreRunInitialization();
71
72 int Rank() const
73 {
74 return _world.rank();
75 }
76
77 template<typename T>
78 void BroadCast(T& value, int root) const
79 {
80 mpi::broadcast(_world, value, root);
81 }
82
83 template<typename T>
84 void BroadCast(T* values, int n, int root) const
85 {
86 mpi::broadcast(_world, values, n, root);
87 }
88
89 template<typename T>
90 void Gather(const T& value, std::vector<T>& vector_of_values, int root) const
91 {
92 mpi::gather(_world, value, vector_of_values, root);
93 }
94
95 void BuildMasterProblem();
96
97 int WorldSize() const
98 {
99 return _world.size();
100 }
101
102 void Barrier() const
103 {
104 _world.barrier();
105 }
106
107 template<typename T, typename Op>
108 void Reduce(const T& in_value, T& out_value, Op op, int root) const
109 {
110 mpi::reduce(_world, in_value, out_value, op, root);
111 }
112
113 template<typename T, typename Op>
114 void AllReduce(const T& in_value, T& out_value, Op op) const
115 {
116 mpi::all_reduce(_world, in_value, out_value, op);
117 }
118
119 virtual void GatherCuts(const SubProblemDataMap& subproblem_data_map, const Timer& walltime);
120 void BroadCastVariablesIndices();
121 virtual void ComputeSubproblemsContributionToCriteria(
122 const SubProblemDataMap& subproblem_data_map);
123 void SolveSubproblem(PlainData::SubProblemData& subproblem_data,
124 const std::string& name,
125 const std::shared_ptr<SubproblemWorker>& worker) override;
126 void UpdateMaxCriterionArea();
127};
Definition BendersBase.h:38
Class use run the benders algorithm in parallel.
Definition BendersMPI.h:16
void InitializeProblems() override
Method to load each problem in a thread.
Definition BendersMPI.cpp:29
void Run() override
Run Benders algorithm in parallel.
Definition BendersMPI.cpp:390
void free() override
Method to free the memory used by each problem.
Definition BendersMPI.cpp:371
Definition Timer.h:10
Definition launch.py:1
Definition common.h:205
Definition SubproblemCut.h:12