Zoltan2
TaskMappingProblemTest.cpp
Go to the documentation of this file.
1 
5 
6 #include <string>
7 
8 #include <Teuchos_RCP.hpp>
9 #include <Teuchos_Array.hpp>
10 #include <Teuchos_GlobalMPISession.hpp>
11 #include <Teuchos_ParameterList.hpp>
12 #include "Teuchos_XMLParameterListHelpers.hpp"
13 
14 #include "Tpetra_MultiVector.hpp"
15 #include <Tpetra_CrsGraph.hpp>
16 #include <Tpetra_Map.hpp>
17 
20 #include <Zoltan2_TimerManager.hpp>
25 
26 typedef Tpetra::CrsGraph<zlno_t, zgno_t, znode_t> mytest_tcrsGraph_t;
27 typedef Tpetra::MultiVector<zscalar_t, zlno_t, zgno_t, znode_t> mytest_tMVector_t;
29 typedef Tpetra::Map<>::node_type mytest_znode_t;
30 typedef Tpetra::Map<zlno_t, zgno_t, mytest_znode_t> mytest_map_t;
32 
37 };
38 
42 };
43 
44 RCP<mytest_tcrsGraph_t> create_tpetra_input_matrix(int nx, int ny, int nz, int numProcs, Teuchos::RCP<const Teuchos::Comm<int> > tcomm,
45  RCP<Zoltan2::Environment> env, zscalar_t ** &partCenters, zgno_t & myTasks){
46 
47  int rank = tcomm->getRank();
48  using namespace Teuchos;
49 
50  int coordDim = 3;
51  zgno_t numGlobalTasks = nx*ny*nz;
52 
53  //int rank = tcomm->getRank();
54  myTasks = numGlobalTasks / numProcs;
55  zgno_t taskLeftOver = numGlobalTasks % numProcs;
56  if (zgno_t(rank) < taskLeftOver ) ++myTasks;
57 
58  zgno_t myTaskBegin = (numGlobalTasks / numProcs) * rank;
59  myTaskBegin += (taskLeftOver < zgno_t(rank) ? taskLeftOver : rank);
60  zgno_t myTaskEnd = myTaskBegin + myTasks;
61 
62  //zscalar_t **partCenters = NULL;
63  partCenters = new zscalar_t * [coordDim];
64  for(int i = 0; i < coordDim; ++i){
65  partCenters[i] = new zscalar_t[myTasks];
66  }
67 
68 
69  zgno_t *task_gnos = new zgno_t [myTasks];
70  zgno_t *task_communication_xadj_ = new zgno_t [myTasks+1];
71  zgno_t *task_communication_adj_ = new zgno_t [myTasks * 6];
72 
73  env->timerStart(Zoltan2::MACRO_TIMERS, "TaskGraphCreate");
74  zlno_t prevNCount = 0;
75  task_communication_xadj_[0] = 0;
76  for (zgno_t i = myTaskBegin; i < myTaskEnd; ++i) {
77  task_gnos[i - myTaskBegin] = i;
78 
79  int x = i % nx;
80  int y = (i / (nx)) % ny;
81  int z = (i / (nx)) / ny;
82  partCenters[0][i - myTaskBegin] = x;
83  partCenters[1][i - myTaskBegin] = y;
84  partCenters[2][i - myTaskBegin] = z;
85 
86  if (x > 0){
87  task_communication_adj_[prevNCount++] = i - 1;
88  }
89  if (x < nx - 1){
90  task_communication_adj_[prevNCount++] = i + 1;
91  }
92  if (y > 0){
93  task_communication_adj_[prevNCount++] = i - nx;
94  }
95  if (y < ny - 1){
96  task_communication_adj_[prevNCount++] = i + nx;
97  }
98  if (z > 0){
99  task_communication_adj_[prevNCount++] = i - nx * ny;
100  }
101  if (z < nz - 1){
102  task_communication_adj_[prevNCount++] = i + nx * ny;
103  }
104  task_communication_xadj_[i+1 - myTaskBegin] = prevNCount;
105  }
106 
107  env->timerStop(Zoltan2::MACRO_TIMERS, "TaskGraphCreate");
108 
109  using namespace Teuchos;
110  RCP<const mytest_map_t> map = rcp (new mytest_map_t (numGlobalTasks, myTasks, 0, tcomm));
111 
112  RCP<mytest_tcrsGraph_t> TpetraCrsGraph(new mytest_tcrsGraph_t (map, 0));
113 
114  env->timerStart(Zoltan2::MACRO_TIMERS, "TpetraGraphCreate");
115 
116  for (zgno_t lclRow = 0; lclRow < myTasks; ++lclRow) {
117  const zgno_t gblRow = map->getGlobalElement (lclRow);
118  zgno_t begin = task_communication_xadj_[lclRow];
119  zgno_t end = task_communication_xadj_[lclRow + 1];
120  const ArrayView< const zgno_t > indices(task_communication_adj_+begin, end-begin);
121  TpetraCrsGraph->insertGlobalIndices(gblRow, indices);
122  }
123  TpetraCrsGraph->fillComplete ();
124 
125  env->timerStop(Zoltan2::MACRO_TIMERS, "TpetraGraphCreate");
126  return TpetraCrsGraph;
127 }
128 
129 
130 RCP <Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> > create_multi_vector_adapter(
131  RCP<const mytest_map_t> map,
132  zscalar_t ** partCenters, zgno_t myTasks){
133 
134  const int coord_dim = 3;
135  Teuchos::Array<Teuchos::ArrayView<const zscalar_t> > coordView(coord_dim);
136 
137  if(myTasks > 0){
138  Teuchos::ArrayView<const zscalar_t> a(partCenters[0], myTasks);
139  coordView[0] = a;
140  Teuchos::ArrayView<const zscalar_t> b(partCenters[1], myTasks);
141  coordView[1] = b;
142  Teuchos::ArrayView<const zscalar_t> c(partCenters[2], myTasks);
143  coordView[2] = c;
144  }
145  else {
146  Teuchos::ArrayView<const zscalar_t> a;
147  coordView[0] = a;
148  coordView[1] = a;
149  coordView[2] = a;
150  }
151  RCP<mytest_tMVector_t> coords(new mytest_tMVector_t(map, coordView.view(0, coord_dim), coord_dim));//= set multivector;
152  RCP<const mytest_tMVector_t> const_coords = rcp_const_cast<const mytest_tMVector_t>(coords);
153  RCP <Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> > adapter (new Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t>(const_coords));
154  return adapter;
155 }
156 
157 
158 void test_distributed_input_adapter(int nx, int ny, int nz, Teuchos::RCP<const Teuchos::Comm<int> > global_tcomm){
159  Teuchos::RCP<const Teuchos::Comm<int> > tcomm = global_tcomm;//Teuchos::createSerialComm<int>();
160  mytest_part_t numProcs = tcomm->getSize();
161  Teuchos::ParameterList distributed_problemParams;
162  //create mapping problem parameters
163  distributed_problemParams.set("Machine_Optimization_Level", 10);
164  distributed_problemParams.set("mapping_algorithm", "geometric");
165  distributed_problemParams.set("distributed_input_adapter", true);
166  distributed_problemParams.set("mj_enable_rcb", true);
167  distributed_problemParams.set("algorithm", "multijagged");
168  distributed_problemParams.set("num_global_parts", numProcs);
169 
170  RCP<Zoltan2::Environment> env (new Zoltan2::Environment(distributed_problemParams, global_tcomm));
171  RCP<Zoltan2::TimerManager> timer(new Zoltan2::TimerManager(global_tcomm, &std::cout, Zoltan2::MACRO_TIMERS));
172  env->setTimer(timer);
174 
175  zscalar_t **partCenters;
176  zgno_t myTasks ;
177  //create tpetra input graph
178  RCP<mytest_tcrsGraph_t> distributed_tpetra_graph = create_tpetra_input_matrix(nx, ny, nz, numProcs, tcomm, env, partCenters, myTasks);
179  RCP<const mytest_map_t> distributed_map = distributed_tpetra_graph->getMap();
180  global_tcomm->barrier();
181 
182  //create input adapter from tpetra graph
183  env->timerStart(Zoltan2::MACRO_TIMERS, "AdapterCreate");
184  RCP<const mytest_tcrsGraph_t> const_tpetra_graph = rcp_const_cast<const mytest_tcrsGraph_t>(distributed_tpetra_graph);
185  RCP<mytest_adapter_t> ia (new mytest_adapter_t(const_tpetra_graph));
186 
187  //create multivector for coordinates and
188  RCP <Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> > distributed_adapter = create_multi_vector_adapter(distributed_map, partCenters, myTasks);
189  ia->setCoordinateInput(distributed_adapter.getRawPtr());
190  env->timerStop(Zoltan2::MACRO_TIMERS, "AdapterCreate");
191  global_tcomm->barrier();
193 
194 
195  //NOW we have 3 ways to create mapping problem.
196  //First, run a partitioning algorithm on the input adapter. Then run task mapping at the result of this partitioning.
197  //Second, run mapping algorithm directly. Mapping algorithm will assume that the tasks within the same processors
198  //are in the same partition, such as they are migrated as a result of a partitioning.
199  //Third, you can create your own partitioning solution without running a partitioning algorithm. This option can be used
200  //to make the task mapping to perform partitioning as well. That is, create a partitioning solution where each element
201  //is a part itself, then task mapping algorithm will map each of these tasks to a processor. As a result of this mapping,
202  //it will perform partitioning as well.
203 
204  //FIRST CREATE A PARTITIONG PROBLEM.
205  Zoltan2::PartitioningProblem<mytest_adapter_t> partitioning_problem (ia.getRawPtr(), &distributed_problemParams, global_tcomm);
206  partitioning_problem.solve();
207  Zoltan2::PartitioningSolution<mytest_adapter_t> partition_solution = partitioning_problem.getSolution();
208 
209  //FOR THE SECOND CASE WE DONT NEED a solution.
210 
211  //FOR the third case we create our own solution and set unique parts to each element.
212  //Basically each element has its global id as part number.
213  Zoltan2::PartitioningSolution<mytest_adapter_t> single_phase_mapping_solution(env, global_tcomm, 0);
214  Teuchos::ArrayView< const zgno_t> gids = distributed_map->getNodeElementList();
215 
216  ArrayRCP<int> initial_part_ids(myTasks);
217  for (zgno_t i = 0; i < myTasks; ++i){
218  initial_part_ids[i] = gids[i];
219  }
220  single_phase_mapping_solution.setParts(initial_part_ids);
221 
222 
223  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Create");
224  //create mapping problem for the first case, provide the partition solution by MJ.
225  Zoltan2::MappingProblem<mytest_adapter_t> distributed_map_problem_1(ia.getRawPtr(), &distributed_problemParams, global_tcomm, &partition_solution);
226 
227  //create mapping problem for the second case. We don't provide a solution in this case.
228  //Mapping assumes that the elements in the current processor is attached together and are in the same part.
229  Zoltan2::MappingProblem<mytest_adapter_t> distributed_map_problem_2(ia.getRawPtr(), &distributed_problemParams, global_tcomm);
230 
231  //create a mapping problem for the third case. We provide a solution in which all elements belong to unique part.
232  Zoltan2::MappingProblem<mytest_adapter_t> distributed_map_problem_3(ia.getRawPtr(), &distributed_problemParams, global_tcomm, &single_phase_mapping_solution);
233 
234  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Create");
235  //solve mapping problem.
236  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Solve");
237  distributed_map_problem_1.solve(true);
238  distributed_map_problem_2.solve(true);
239  distributed_map_problem_3.solve(true);
240  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Solve");
241 
242  //get the solution.
243  Zoltan2::MappingSolution<mytest_adapter_t> *msoln1 = distributed_map_problem_1.getSolution();
244  Zoltan2::MappingSolution<mytest_adapter_t> *msoln2 = distributed_map_problem_2.getSolution();
245  Zoltan2::MappingSolution<mytest_adapter_t> *msoln3 = distributed_map_problem_3.getSolution();
246 
247  timer->printAndResetToZero();
248 
249  //typedef Zoltan2::EvaluatePartition<my_adapter_t> quality_t;
250  //typedef Zoltan2::EvaluatePartition<my_adapter_t> quality_t;
252 
253  RCP<quality_t> metricObject_1 = rcp(new quality_t(ia.getRawPtr(),&distributed_problemParams,global_tcomm,msoln1, distributed_map_problem_1.getMachine().getRawPtr()));
254  //metricObject_1->evaluate();
255  RCP<quality_t> metricObject_2 = rcp(new quality_t(ia.getRawPtr(),&distributed_problemParams,global_tcomm,msoln2, distributed_map_problem_2.getMachine().getRawPtr()));
256  //metricObject_2->evaluate();
257  RCP<quality_t> metricObject_3 = rcp(new quality_t(ia.getRawPtr(),&distributed_problemParams,global_tcomm,msoln3, distributed_map_problem_3.getMachine().getRawPtr()));
258  //metricObject_3->evaluate();
259 
260  if (global_tcomm->getRank() == 0){
261  std::cout << "METRICS FOR THE FIRST CASE - TWO PHASE MAPPING" << std::endl;
262  metricObject_1->printMetrics(cout);
263  std::cout << "METRICS FOR THE SECOND CASE - TWO PHASE MAPPING - INITIAL ASSIGNMENT ARE ASSUMED TO BE A PART" << std::endl;
264  metricObject_2->printMetrics(cout);
265  std::cout << "METRICS FOR THE THIRD CASE - ONE PHASE MAPPING - EACH ELEMENT IS ASSUMED TO BE IN UNIQUE PART AT THE BEGINNING" << std::endl;
266  metricObject_3->printMetrics(cout);
267  }
268 }
269 
270 
271 
272 void test_serial_input_adapter(int nx, int ny, int nz, Teuchos::RCP<const Teuchos::Comm<int> > global_tcomm){
273  //all processors have the all input in this case.
274  Teuchos::RCP<const Teuchos::Comm<int> > serial_comm = Teuchos::createSerialComm<int>();
275 
276  //for the input creation, let processor think that it is the only processor.
277  mytest_part_t numProcs = serial_comm->getSize();
278  Teuchos::ParameterList serial_problemParams;
279  //create mapping problem parameters
280  serial_problemParams.set("Machine_Optimization_Level", 10);
281  serial_problemParams.set("mapping_algorithm", "geometric");
282  serial_problemParams.set("distributed_input_adapter", false);
283  serial_problemParams.set("algorithm", "multijagged");
284  serial_problemParams.set("num_global_parts", numProcs);
285 
286  RCP<Zoltan2::Environment> env (new Zoltan2::Environment(serial_problemParams, global_tcomm));
287  RCP<Zoltan2::TimerManager> timer(new Zoltan2::TimerManager(global_tcomm, &std::cout, Zoltan2::MACRO_TIMERS));
288  env->setTimer(timer);
290 
291  zscalar_t **partCenters;
292  zgno_t myTasks ;
293  //create tpetra input graph
294  RCP<mytest_tcrsGraph_t> serial_tpetra_graph = create_tpetra_input_matrix(nx, ny, nz, numProcs, serial_comm, env, partCenters, myTasks);
295  RCP<const mytest_map_t> serial_map = serial_tpetra_graph->getMap();
296  global_tcomm->barrier();
297 
298  //create input adapter from tpetra graph
299  env->timerStart(Zoltan2::MACRO_TIMERS, "AdapterCreate");
300  RCP<const mytest_tcrsGraph_t> const_tpetra_graph = rcp_const_cast<const mytest_tcrsGraph_t>(serial_tpetra_graph);
301  RCP<mytest_adapter_t> ia (new mytest_adapter_t(const_tpetra_graph));
302 
303  //create multivector for coordinates and
304  RCP <Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> > serial_adapter = create_multi_vector_adapter(serial_map, partCenters, myTasks);
305  ia->setCoordinateInput(serial_adapter.getRawPtr());
306  env->timerStop(Zoltan2::MACRO_TIMERS, "AdapterCreate");
307  global_tcomm->barrier();
309 
310 
311  //NOW, it only makes sense to map them serially. This is a case for the applications,
312  //where they already have the whole graph in all processes, and they want to do the mapping.
313  //Basically it will same time mapping algorithm, if that is the case.
314 
315  //First case from the distributed case does not really make sense and it is errornous.
316  //zoltan partitioning algorithms require distributed input. One still can do that in two phases,
317  //but needs to make sure that distributed and serial input adapters matches correctly.
318 
319  //Second case does not make sense and errornous. All elements are within the same node and they should not be
320  //assumed to be in the same part, since it will result only a single part.
321 
322  //If input adapter is not distributed, we are only interested in the third case.
323  //Each element will be its own unique part at the beginning of the mapping.
324 
325  //FOR the third case we create our own solution and set unique parts to each element.
326  //Basically each element has its global id as part number.
327  //It global ids are same as local ids here because each processors owns the whole thing.
328  Zoltan2::PartitioningSolution<mytest_adapter_t> single_phase_mapping_solution(env, global_tcomm, 0);
329  Teuchos::ArrayView< const zgno_t> gids = serial_map->getNodeElementList();
330 
331  ArrayRCP<int> initial_part_ids(myTasks);
332  for (zgno_t i = 0; i < myTasks; ++i){
333  initial_part_ids[i] = gids[i];
334  }
335  single_phase_mapping_solution.setParts(initial_part_ids);
336 
337 
338  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Create");
339  //create a mapping problem for the third case. We provide a solution in which all elements belong to unique part.
340  //even the input is not distributed, we still provide the global_tcomm because processors will calculate different mappings
341  //and the best one will be chosen.
342  Zoltan2::MappingProblem<mytest_adapter_t> serial_map_problem(ia.getRawPtr(), &serial_problemParams, global_tcomm, &single_phase_mapping_solution);
343 
344  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Create");
345  //solve mapping problem.
346  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Solve");
347  serial_map_problem.solve(true);
348  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Solve");
349 
350  //get the solution.
351  Zoltan2::MappingSolution<mytest_adapter_t> *msoln3 = serial_map_problem.getSolution();
352 
353  timer->printAndResetToZero();
354 
355  //typedef Zoltan2::EvaluatePartition<my_adapter_t> quality_t;
357 
358 
359  //input is not distributed in this case.
360  //metric object should be given the serial comm so that it can calculate the correct metrics without global communication.
361  RCP<quality_t> metricObject_3 = rcp(new quality_t(ia.getRawPtr(),&serial_problemParams,serial_comm,msoln3, serial_map_problem.getMachine().getRawPtr()));
362 
363  if (global_tcomm->getRank() == 0){
364  std::cout << "METRICS FOR THE SERIAL CASE - ONE PHASE MAPPING - EACH ELEMENT IS ASSUMED TO BE IN UNIQUE PART AT THE BEGINNING" << std::endl;
365  metricObject_3->printMetrics(cout);
366  }
367 }
368 
369 int main(int argc, char *argv[]){
370 
371 
372  Teuchos::GlobalMPISession session(&argc, &argv);
373  Teuchos::RCP<const Teuchos::Comm<int> > global_tcomm = Teuchos::DefaultComm<int>::getComm();
374 
375  int nx = 16, ny = 16, nz = 16;
376  for ( int i = 1 ; i < argc ; ++i ) {
377  if ( 0 == strcasecmp( argv[i] , "NX" ) ) {
378  nx = atoi( argv[++i] );
379  }
380  else if ( 0 == strcasecmp( argv[i] , "NY" ) ) {
381  ny = atoi( argv[++i] );
382  }
383  else if ( 0 == strcasecmp( argv[i] , "NZ" ) ) {
384  nz = atoi( argv[++i] );
385  }
386  else{
387  std::cerr << "Unrecognized command line argument #" << i << ": " << argv[i] << std::endl ;
388  return 1;
389  }
390  }
391 
392 
393  try{
394 
395 
396  Teuchos::RCP<const Teuchos::Comm<int> > serial_comm = Teuchos::createSerialComm<int>();
397  test_distributed_input_adapter(nx, ny, nz, global_tcomm);
398  test_serial_input_adapter(nx, ny, nz, global_tcomm);
399 
400 #if 0
401  {
402  part_t my_parts = 0, *my_result_parts;
403  //const part_t *local_element_to_rank = msoln1->getPartListView();
404 
405  std::cout << "me:" << global_tcomm->getRank() << " my_parts:" << my_parts << " myTasks:" << myTasks << std::endl;
406  if (global_tcomm->getRank() == 0) {
407 
408  //zscalar_t **dots = partCenters;
409  //int i = 0, j =0;
410  FILE *f2 = fopen("plot.gnuplot", "w");
411  for (int i = 0; i< global_tcomm->getSize(); ++i){
412  char str[20];
413  sprintf(str, "coords%d.txt", i);
414  if (i == 0){
415  fprintf(f2,"splot \"%s\"\n", str);
416  }
417  else {
418  fprintf(f2,"replot \"%s\"\n", str);
419  }
420  }
421  fprintf(f2,"pause-1\n");
422  fclose(f2);
423  }
424  char str[20];
425  int myrank = global_tcomm->getRank();
426  sprintf(str, "coords%d.txt", myrank);
427  FILE *coord_files = fopen(str, "w");
428 
429 
430  for (int j = 0; j < my_parts; ++j){
431  int findex = my_result_parts[j];
432  std::cout << "findex " << findex << std::endl;
433  fprintf(coord_files,"%lf %lf %lf\n", partCenters[0][findex], partCenters[1][findex], partCenters[2][findex]);
434  }
435  fclose(coord_files);
436  }
437 #endif
438 
439  if (global_tcomm->getRank() == 0){
440  cout << "PASS" << endl;
441  }
442  }
443  catch(std::string &s){
444  cerr << s << endl;
445  }
446 
447  catch(char * s){
448  cerr << s << endl;
449  }
450 }
451 
RCP< Zoltan2::XpetraMultiVectorAdapter< mytest_tMVector_t > > create_multi_vector_adapter(RCP< const mytest_map_t > map, zscalar_t **partCenters, zgno_t myTasks)
Time an algorithm (or other entity) as a whole.
Zoltan2::XpetraCrsGraphAdapter< mytest_tcrsGraph_t, mytest_tMVector_t > mytest_adapter_t
MappingInputDistributution
Tpetra::CrsGraph< zlno_t, zgno_t, znode_t > mytest_tcrsGraph_t
PartitionMapping maps a solution or an input distribution to ranks.
void solve(bool updateInputData=true)
Direct the problem to create a solution.
double zscalar_t
mytest_adapter_t::part_t mytest_part_t
Provides access for Zoltan2 to Xpetra::CrsGraph data.
int zlno_t
common code used by tests
Tpetra::Map ::node_type mytest_znode_t
void test_distributed_input_adapter(int nx, int ny, int nz, Teuchos::RCP< const Teuchos::Comm< int > > global_tcomm)
Tpetra::Map< zlno_t, zgno_t, mytest_znode_t > mytest_map_t
Defines the XpetraMultiVectorAdapter.
Defines XpetraCrsGraphAdapter class.
SparseMatrixAdapter_t::part_t part_t
void test_serial_input_adapter(int nx, int ny, int nz, Teuchos::RCP< const Teuchos::Comm< int > > global_tcomm)
A PartitioningSolution is a solution to a partitioning problem.
Defines the EvaluatePartition class.
int main(int argc, char *argv[])
InputTraits< User >::part_t part_t
The user parameters, debug, timing and memory profiling output objects, and error checking methods...
An adapter for Xpetra::MultiVector.
void setParts(ArrayRCP< part_t > &partList)
The algorithm uses setParts to set the solution.
int zgno_t
Tpetra::MultiVector< zscalar_t, zlno_t, zgno_t, znode_t > mytest_tMVector_t
Defines the MappingSolution class.
PartitioningProblem sets up partitioning problems for the user.
RCP< mytest_tcrsGraph_t > create_tpetra_input_matrix(int nx, int ny, int nz, int numProcs, Teuchos::RCP< const Teuchos::Comm< int > > tcomm, RCP< Zoltan2::Environment > env, zscalar_t **&partCenters, zgno_t &myTasks)
Defines the PartitioningProblem class.
MappingProblem enables mapping of a partition (either computed or input) to MPI ranks.
A class that computes and returns quality metrics.
void solve(bool updateInputData=true)
Direct the problem to create a solution.
Declarations for TimerManager.
Defines the MappingProblem class.