Athena++/Atmosphere
Planetary Atmosphere Simulator
main.cpp
Go to the documentation of this file.
1 //========================================================================================
2 // Athena++ astrophysical MHD code
3 // Copyright(C) 2014 James M. Stone <jmstone@princeton.edu> and other code contributors
4 // Licensed under the 3-clause BSD License, see LICENSE file for details
5 //========================================================================================
8 // \brief Athena++ main program
9 //
10 // Based on the Athena MHD code (Cambridge version), originally written in 2002-2005 by
11 // Jim Stone, Tom Gardiner, and Peter Teuben, with many important contributions by many
12 // other developers after that, i.e. 2005-2014.
13 //
14 // Athena++ was started in Jan 2014. The core design was finished during 4-7/2014 at the
15 // KITP by Jim Stone. GR was implemented by Chris White and AMR by Kengo Tomida during
16 // 2014-2016. Contributions from many others have continued to the present.
17 //========================================================================================
18 
19 // C headers
20 
21 // C++ headers
22 #include <cmath> // sqrt()
23 #include <csignal> // ISO C/C++ signal() and sigset_t, sigemptyset() POSIX C extensions
24 #include <cstdint> // int64_t
25 #include <cstdio> // sscanf()
26 #include <cstdlib> // strtol
27 #include <ctime> // clock(), CLOCKS_PER_SEC, clock_t
28 #include <exception> // exception
29 #include <iomanip> // setprecision()
30 #include <iostream> // cout, endl
31 #include <limits> // max_digits10
32 #include <new> // bad_alloc
33 #include <string> // string
34 
35 // Athena++ headers
36 #include "athena.hpp"
37 #include "fft/turbulence.hpp"
38 #include "globals.hpp"
39 #include "gravity/fft_gravity.hpp"
40 #include "gravity/mg_gravity.hpp"
41 #include "mesh/mesh.hpp"
42 #include "outputs/io_wrapper.hpp"
43 #include "outputs/outputs.hpp"
44 #include "parameter_input.hpp"
45 #include "utils/utils.hpp"
46 #include "particles/material_point.hpp"
48 
49 // MPI/OpenMP headers
50 #ifdef MPI_PARALLEL
51 #include <mpi.h>
52  MPI_Datatype MPI_PARTICLE;
53 #endif
54 
55 #ifdef OPENMP_PARALLEL
56 #include <omp.h>
57 #endif
58 
59 //----------------------------------------------------------------------------------------
61 // \brief Athena++ main program
62 
63 int main(int argc, char *argv[]) {
64  std::string athena_version = "version 19.0 - August 2019";
65  char *input_filename = nullptr, *restart_filename = nullptr;
66  char *prundir = nullptr;
67  int res_flag = 0; // set to 1 if -r argument is on cmdline
68  int narg_flag = 0; // set to 1 if -n argument is on cmdline
69  int iarg_flag = 0; // set to 1 if -i <file> argument is on cmdline
70  int mesh_flag = 0; // set to <nproc> if -m <nproc> argument is on cmdline
71  int wtlim = 0;
72  std::uint64_t mbcnt = 0;
73 
74  //--- Step 1. --------------------------------------------------------------------------
75  // Initialize MPI environment, if necessary
76 
77 #ifdef MPI_PARALLEL
78 
79 #ifdef OPENMP_PARALLEL
80  int mpiprv;
81  if (MPI_SUCCESS != MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &mpiprv)) {
82  std::cout << "### FATAL ERROR in main" << std::endl
83  << "MPI Initialization failed." << std::endl;
84  return(0);
85  }
86  if (mpiprv != MPI_THREAD_MULTIPLE) {
87  std::cout << "### FATAL ERROR in main" << std::endl
88  << "MPI_THREAD_MULTIPLE must be supported for the hybrid parallelzation. "
89  << MPI_THREAD_MULTIPLE << " : " << mpiprv
90  << std::endl;
91  MPI_Finalize();
92  return(0);
93  }
94 #else // no OpenMP
95  if (MPI_SUCCESS != MPI_Init(&argc, &argv)) {
96  std::cout << "### FATAL ERROR in main" << std::endl
97  << "MPI Initialization failed." << std::endl;
98  return(0);
99  }
100 #endif // OPENMP_PARALLEL
101  // Get process id (rank) in MPI_COMM_WORLD
102  if (MPI_SUCCESS != MPI_Comm_rank(MPI_COMM_WORLD, &(Globals::my_rank))) {
103  std::cout << "### FATAL ERROR in main" << std::endl
104  << "MPI_Comm_rank failed." << std::endl;
105  MPI_Finalize();
106  return(0);
107  }
108 
109  // Get total number of MPI processes (ranks)
110  if (MPI_SUCCESS != MPI_Comm_size(MPI_COMM_WORLD, &Globals::nranks)) {
111  std::cout << "### FATAL ERROR in main" << std::endl
112  << "MPI_Comm_size failed." << std::endl;
113  MPI_Finalize();
114  return(0);
115  }
116 
117  // Get maximum value of MPI tag
118  MPI_Aint* tag_ub_ptr;
119  int att_flag;
120  MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_TAG_UB, &tag_ub_ptr, &att_flag);
121  Globals::mpi_tag_ub = *tag_ub_ptr;
122 
123  // commit MPI_PARTICLE
124  int counts[3] = {1, 2+NINT_PARTICLE_DATA, 8+NREAL_PARTICLE_DATA};
125  MPI_Datatype types[3] = {MPI_AINT, MPI_INT, MPI_ATHENA_REAL};
126  MPI_Aint disps[3] = {offsetof(MaterialPoint, next), offsetof(MaterialPoint, id),
127  offsetof(MaterialPoint, time)};
128 
129  MPI_Type_create_struct(3, counts, disps, types, &MPI_PARTICLE);
130  MPI_Type_commit(&MPI_PARTICLE);
131 #else // no MPI
132  Globals::my_rank = 0;
133  Globals::nranks = 1;
135 #endif // MPI_PARALLEL
136 
137  //--- Step 2. --------------------------------------------------------------------------
138  // Check for command line options and respond.
139 
140  for (int i=1; i<argc; i++) {
141  // If argv[i] is a 2 character string of the form "-?" then:
142  if (*argv[i] == '-' && *(argv[i]+1) != '\0' && *(argv[i]+2) == '\0') {
143  // check validity of command line options + arguments:
144  char opt_letter = *(argv[i]+1);
145  switch(opt_letter) {
146  // options that do not take arguments:
147  case 'n':
148  case 'c':
149  case 'h':
150  break;
151  // options that require arguments:
152  default:
153  if ((i+1 >= argc) // flag is at the end of the command line options
154  || (*argv[i+1] == '-') ) { // flag is followed by another flag
155  if (Globals::my_rank == 0) {
156  std::cout << "### FATAL ERROR in main" << std::endl
157  << "-" << opt_letter << " must be followed by a valid argument\n";
158 #ifdef MPI_PARALLEL
159  MPI_Finalize();
160 #endif
161  return(0);
162  }
163  }
164  }
165  switch(*(argv[i]+1)) {
166  case 'i': // -i <input_filename>
167  input_filename = argv[++i];
168  iarg_flag = 1;
169  break;
170  case 'r': // -r <restart_file>
171  res_flag = 1;
172  restart_filename = argv[++i];
173  break;
174  case 'd': // -d <run_directory>
175  prundir = argv[++i];
176  break;
177  case 'n':
178  narg_flag = 1;
179  break;
180  case 'm': // -m <nproc>
181  mesh_flag = static_cast<int>(std::strtol(argv[++i], nullptr, 10));
182  break;
183  case 't': // -t <hh:mm:ss>
184  int wth, wtm, wts;
185  std::sscanf(argv[++i], "%d:%d:%d", &wth, &wtm, &wts);
186  wtlim = wth*3600 + wtm*60 + wts;
187  break;
188  case 'c':
189  if (Globals::my_rank == 0) ShowConfig();
190 #ifdef MPI_PARALLEL
191  MPI_Finalize();
192 #endif
193  return(0);
194  break;
195  case 'h':
196  default:
197  if (Globals::my_rank == 0) {
198  std::cout << "Athena++ " << athena_version << std::endl;
199  std::cout << "Usage: " << argv[0] << " [options] [block/par=value ...]\n";
200  std::cout << "Options:" << std::endl;
201  std::cout << " -i <file> specify input file [athinput]\n";
202  std::cout << " -r <file> restart with this file\n";
203  std::cout << " -d <directory> specify run dir [current dir]\n";
204  std::cout << " -n parse input file and quit\n";
205  std::cout << " -c show configuration and quit\n";
206  std::cout << " -m <nproc> output mesh structure and quit\n";
207  std::cout << " -t hh:mm:ss wall time limit for final output\n";
208  std::cout << " -h this help\n";
209  ShowConfig();
210  }
211 #ifdef MPI_PARALLEL
212  MPI_Finalize();
213 #endif
214  return(0);
215  break;
216  }
217  } // else if argv[i] not of form "-?" ignore it here (tested in ModifyFromCmdline)
218  }
219 
220  if (restart_filename == nullptr && input_filename == nullptr) {
221  // no input file is given
222  std::cout << "### FATAL ERROR in main" << std::endl
223  << "No input file or restart file is specified." << std::endl;
224 #ifdef MPI_PARALLEL
225  MPI_Finalize();
226 #endif
227  return(0);
228  }
229 
230  // Set up the signal handler
232  if (Globals::my_rank == 0 && wtlim > 0)
234 
235  // Note steps 3-6 are protected by a simple error handler
236  //--- Step 3. --------------------------------------------------------------------------
237  // Construct object to store input parameters, then parse input file and command line.
238  // With MPI, the input is read by every process in parallel using MPI-IO.
239 
240  ParameterInput *pinput;
241  IOWrapper infile, restartfile;
242 #ifdef ENABLE_EXCEPTIONS
243  try {
244 #endif
245  pinput = new ParameterInput;
246  if (res_flag == 1) {
247  restartfile.Open(restart_filename, IOWrapper::FileMode::read);
248  pinput->LoadFromFile(restartfile);
249  // If both -r and -i are specified, make sure next_time gets corrected.
250  // This needs to be corrected on the restart file because we need the old dt.
251  if (iarg_flag == 1) pinput->RollbackNextTime();
252  // leave the restart file open for later use
253  }
254  if (iarg_flag == 1) {
255  // if both -r and -i are specified, override the parameters using the input file
256  infile.Open(input_filename, IOWrapper::FileMode::read);
257  pinput->LoadFromFile(infile);
258  infile.Close();
259  }
260  pinput->ModifyFromCmdline(argc ,argv);
261 #ifdef ENABLE_EXCEPTIONS
262  }
263  catch(std::bad_alloc& ba) {
264  std::cout << "### FATAL ERROR in main" << std::endl
265  << "memory allocation failed initializing class ParameterInput: "
266  << ba.what() << std::endl;
267  if (res_flag == 1) restartfile.Close();
268 #ifdef MPI_PARALLEL
269  MPI_Finalize();
270 #endif
271  return(0);
272  }
273  catch(std::exception const& ex) {
274  std::cout << ex.what() << std::endl; // prints diagnostic message
275  if (res_flag == 1) restartfile.Close();
276 #ifdef MPI_PARALLEL
277  MPI_Finalize();
278 #endif
279  return(0);
280  }
281 #endif // ENABLE_EXCEPTIONS
282 
283  //--- Step 4. --------------------------------------------------------------------------
284  // Construct and initialize Mesh
285 
286  Mesh *pmesh;
287 #ifdef ENABLE_EXCEPTIONS
288  try {
289 #endif
290  if (res_flag == 0) {
291  pmesh = new Mesh(pinput, mesh_flag);
292  } else {
293  pmesh = new Mesh(pinput, restartfile, mesh_flag);
294  }
295 #ifdef ENABLE_EXCEPTIONS
296  }
297  catch(std::bad_alloc& ba) {
298  std::cout << "### FATAL ERROR in main" << std::endl
299  << "memory allocation failed initializing class Mesh: "
300  << ba.what() << std::endl;
301  if (res_flag == 1) restartfile.Close();
302 #ifdef MPI_PARALLEL
303  MPI_Finalize();
304 #endif
305  return(0);
306  }
307  catch(std::exception const& ex) {
308  std::cout << ex.what() << std::endl; // prints diagnostic message
309  if (res_flag == 1) restartfile.Close();
310 #ifdef MPI_PARALLEL
311  MPI_Finalize();
312 #endif
313  return(0);
314  }
315 #endif // ENABLE_EXCEPTIONS
316 
317  // With current mesh time possibly read from restart file, correct next_time for outputs
318  if (iarg_flag == 1 && res_flag == 1) {
319  // if both -r and -i are specified, ensure that next_time >= mesh_time - dt
320  pinput->ForwardNextTime(pmesh->time);
321  }
322 
323  // Dump input parameters and quit if code was run with -n option.
324  if (narg_flag) {
325  if (Globals::my_rank == 0) pinput->ParameterDump(std::cout);
326  if (res_flag == 1) restartfile.Close();
327 #ifdef MPI_PARALLEL
328  MPI_Finalize();
329 #endif
330  return(0);
331  }
332 
333  if (res_flag == 1) restartfile.Close(); // close the restart file here
334 
335  // Quit if -m was on cmdline. This option builds and outputs mesh structure.
336  if (mesh_flag > 0) {
337 #ifdef MPI_PARALLEL
338  MPI_Finalize();
339 #endif
340  return(0);
341  }
342 
343  //--- Step 5. --------------------------------------------------------------------------
344  // Construct and initialize TaskList
345 
346  //TimeIntegratorTaskList *ptlist;
347  TaskList *ptlist;
348 #ifdef ENABLE_EXCEPTIONS
349  try {
350 #endif
351  ptlist = new MAIN_TASKLIST(pinput, pmesh);
352  //ptlist = new TimeIntegratorTaskList(pinput, pmesh);
353 #ifdef ENABLE_EXCEPTIONS
354  }
355  catch(std::bad_alloc& ba) {
356  std::cout << "### FATAL ERROR in main" << std::endl << "memory allocation failed "
357  << "in creating task list " << ba.what() << std::endl;
358 #ifdef MPI_PARALLEL
359  MPI_Finalize();
360 #endif
361  return(0);
362  }
363 #endif // ENABLE_EXCEPTIONS
364 
365  SuperTimeStepTaskList *pststlist = nullptr;
366 #if STS_ENABLED != 0
367  //if (STS_ENABLED) {
368 #ifdef ENABLE_EXCEPTIONS
369  try {
370 #endif
371  pststlist = new SuperTimeStepTaskList(pinput, pmesh, ptlist);
372 #ifdef ENABLE_EXCEPTIONS
373  }
374  catch(std::bad_alloc& ba) {
375  std::cout << "### FATAL ERROR in main" << std::endl << "memory allocation failed "
376  << "in creating task list " << ba.what() << std::endl;
377 #ifdef MPI_PARALLEL
378  MPI_Finalize();
379 #endif
380  return(0);
381  }
382 #endif // ENABLE_EXCEPTIONS
383 #endif
384 
385  //--- Step 6. --------------------------------------------------------------------------
386  // Set initial conditions by calling problem generator, or reading restart file
387 
388 #ifdef ENABLE_EXCEPTIONS
389  try {
390 #endif
391  pmesh->Initialize(res_flag, pinput);
392 #ifdef ENABLE_EXCEPTIONS
393  }
394  catch(std::bad_alloc& ba) {
395  std::cout << "### FATAL ERROR in main" << std::endl << "memory allocation failed "
396  << "in problem generator " << ba.what() << std::endl;
397 #ifdef MPI_PARALLEL
398  MPI_Finalize();
399 #endif
400  return(0);
401  }
402  catch(std::exception const& ex) {
403  std::cout << ex.what() << std::endl; // prints diagnostic message
404 #ifdef MPI_PARALLEL
405  MPI_Finalize();
406 #endif
407  return(0);
408  }
409 #endif // ENABLE_EXCEPTIONS
410 
411  //--- Step 7. --------------------------------------------------------------------------
412  // Change to run directory, initialize outputs object, and make output of ICs
413 
414  Outputs *pouts;
415 #ifdef ENABLE_EXCEPTIONS
416  try {
417 #endif
418  ChangeRunDir(prundir);
419  pouts = new Outputs(pmesh, pinput);
420  if (res_flag == 0) pouts->MakeOutputs(pmesh, pinput);
421 #ifdef ENABLE_EXCEPTIONS
422  }
423  catch(std::bad_alloc& ba) {
424  std::cout << "### FATAL ERROR in main" << std::endl
425  << "memory allocation failed setting initial conditions: "
426  << ba.what() << std::endl;
427 #ifdef MPI_PARALLEL
428  MPI_Finalize();
429 #endif
430  return(0);
431  }
432  catch(std::exception const& ex) {
433  std::cout << ex.what() << std::endl; // prints diagnostic message
434 #ifdef MPI_PARALLEL
435  MPI_Finalize();
436 #endif
437  return(0);
438  }
439 #endif // ENABLE_EXCEPTIONS
440 
441  //=== Step 8. === START OF MAIN INTEGRATION LOOP =======================================
442  // For performance, there is no error handler protecting this step (except outputs)
443 
444  if (Globals::my_rank == 0) {
445  std::cout << "\nSetup complete, entering main loop...\n" << std::endl;
446  }
447 
448  clock_t tstart = clock();
449 #ifdef OPENMP_PARALLEL
450  double omp_start_time = omp_get_wtime();
451 #endif
452 
453  while ((pmesh->time < pmesh->tlim) &&
454  (pmesh->nlim < 0 || pmesh->ncycle < pmesh->nlim)) {
455  if (Globals::my_rank == 0)
456  pmesh->OutputCycleDiagnostics();
457 
458  if (STS_ENABLED) {
459  // compute nstages for this STS
460  Real my_dt = pmesh->dt;
461  Real dt_parabolic = pmesh->dt_parabolic;
462  pststlist->nstages =
463  static_cast<int>(0.5*(-1. + std::sqrt(1. + 8.*my_dt/dt_parabolic))) + 1;
464 
465  // take super-timestep
466  for (int stage=1; stage<=pststlist->nstages; ++stage)
467  pststlist->DoTaskListOneStage(pmesh,stage);
468  }
469 
470  if (pmesh->turb_flag > 1) pmesh->ptrbd->Driving(); // driven turbulence
471 
472  for (int stage=1; stage<=ptlist->nstages; ++stage) {
473  if (SELF_GRAVITY_ENABLED == 1) // fft (flag 0 for discrete kernel, 1 for continuous)
474  pmesh->pfgrd->Solve(stage, 0);
475  else if (SELF_GRAVITY_ENABLED == 2) // multigrid
476  pmesh->pmgrd->Solve(stage);
477  ptlist->DoTaskListOneStage(pmesh, stage);
478  }
479 
480  pmesh->UserWorkInLoop();
481 
482  pmesh->ncycle++;
483  pmesh->time += pmesh->dt;
484  mbcnt += pmesh->nbtotal;
485  pmesh->step_since_lb++;
486 
488 
489  pmesh->NewTimeStep();
490 #ifdef ENABLE_EXCEPTIONS
491  try {
492 #endif
493  if (pmesh->time < pmesh->tlim) // skip the final output as it happens later
494  pouts->MakeOutputs(pmesh,pinput);
495 #ifdef ENABLE_EXCEPTIONS
496  }
497  catch(std::bad_alloc& ba) {
498  std::cout << "### FATAL ERROR in main" << std::endl
499  << "memory allocation failed during output: " << ba.what() <<std::endl;
500 #ifdef MPI_PARALLEL
501  MPI_Finalize();
502 #endif
503  return(0);
504  }
505  catch(std::exception const& ex) {
506  std::cout << ex.what() << std::endl; // prints diagnostic message
507 #ifdef MPI_PARALLEL
508  MPI_Finalize();
509 #endif
510  return(0);
511  }
512 #endif // ENABLE_EXCEPTIONS
513 
514  // check for signals
515  if (SignalHandler::CheckSignalFlags() != 0) {
516  break;
517  }
518  } // END OF MAIN INTEGRATION LOOP ======================================================
519  // Make final outputs, print diagnostics, clean up and terminate
520 
521  if (Globals::my_rank == 0 && wtlim > 0)
523 
524  //--- Step 9. --------------------------------------------------------------------------
525  // Make the final outputs
526 #ifdef ENABLE_EXCEPTIONS
527  try {
528 #endif
529  pouts->MakeOutputs(pmesh,pinput,true);
530 #ifdef ENABLE_EXCEPTIONS
531  }
532  catch(std::bad_alloc& ba) {
533  std::cout << "### FATAL ERROR in main" << std::endl
534  << "memory allocation failed during output: " << ba.what() <<std::endl;
535 #ifdef MPI_PARALLEL
536  MPI_Finalize();
537 #endif
538  return(0);
539  }
540  catch(std::exception const& ex) {
541  std::cout << ex.what() << std::endl; // prints diagnostic message
542 #ifdef MPI_PARALLEL
543  MPI_Finalize();
544 #endif
545  return(0);
546  }
547 #endif // ENABLE_EXCEPTIONS
548 
549  pmesh->UserWorkAfterLoop(pinput);
550 
551  //--- Step 10. -------------------------------------------------------------------------
552  // Print diagnostic messages related to the end of the simulation
553  if (Globals::my_rank == 0) {
554  pmesh->OutputCycleDiagnostics();
555  if (SignalHandler::GetSignalFlag(SIGTERM) != 0) {
556  std::cout << std::endl << "Terminating on Terminate signal" << std::endl;
557  } else if (SignalHandler::GetSignalFlag(SIGINT) != 0) {
558  std::cout << std::endl << "Terminating on Interrupt signal" << std::endl;
559  } else if (SignalHandler::GetSignalFlag(SIGALRM) != 0) {
560  std::cout << std::endl << "Terminating on wall-time limit" << std::endl;
561  } else if (pmesh->ncycle == pmesh->nlim) {
562  std::cout << std::endl << "Terminating on cycle limit" << std::endl;
563  } else {
564  std::cout << std::endl << "Terminating on time limit" << std::endl;
565  }
566 
567  std::cout << "time=" << pmesh->time << " cycle=" << pmesh->ncycle << std::endl;
568  std::cout << "tlim=" << pmesh->tlim << " nlim=" << pmesh->nlim << std::endl;
569 
570  if (pmesh->adaptive) {
571  std::cout << std::endl << "Number of MeshBlocks = " << pmesh->nbtotal
572  << "; " << pmesh->nbnew << " created, " << pmesh->nbdel
573  << " destroyed during this simulation." << std::endl;
574  }
575 
576  // Calculate and print the zone-cycles/cpu-second and wall-second
577 #ifdef OPENMP_PARALLEL
578  double omp_time = omp_get_wtime() - omp_start_time;
579 #endif
580  clock_t tstop = clock();
581  double cpu_time = (tstop>tstart ? static_cast<double> (tstop-tstart) :
582  1.0)/static_cast<double> (CLOCKS_PER_SEC);
583  std::uint64_t zonecycles =
584  mbcnt*static_cast<std::uint64_t> (pmesh->pblock->GetNumberOfMeshBlockCells());
585  double zc_cpus = static_cast<double> (zonecycles) / cpu_time;
586 
587  std::cout << std::endl << "zone-cycles = " << zonecycles << std::endl;
588  std::cout << "cpu time used = " << cpu_time << std::endl;
589  std::cout << "zone-cycles/cpu_second = " << zc_cpus << std::endl;
590 #ifdef OPENMP_PARALLEL
591  double zc_omps = static_cast<double> (zonecycles) / omp_time;
592  std::cout << std::endl << "omp wtime used = " << omp_time << std::endl;
593  std::cout << "zone-cycles/omp_wsecond = " << zc_omps << std::endl;
594 #endif
595  }
596 
597  delete pinput;
598  delete pmesh;
599  delete ptlist;
600  delete pouts;
601 
602 #ifdef MPI_PARALLEL
603  MPI_Type_free(&MPI_PARTICLE);
604  MPI_Finalize();
605 #endif
606 
607  return(0);
608 }
double Real
Definition: athena.hpp:29
int GetNumberOfMeshBlockCells()
Definition: mesh.hpp:149
Definition: mesh.hpp:203
void OutputCycleDiagnostics()
Definition: mesh.cpp:1855
int nlim
Definition: mesh.hpp:249
FFTGravityDriver * pfgrd
Definition: mesh.hpp:262
Real dt_parabolic
Definition: mesh.hpp:248
Real dt
Definition: mesh.hpp:248
int nbdel
Definition: mesh.hpp:251
const bool adaptive
Definition: mesh.hpp:246
int nbtotal
Definition: mesh.hpp:251
Real tlim
Definition: mesh.hpp:248
void NewTimeStep()
Definition: mesh.cpp:1065
TurbulenceDriver * ptrbd
Definition: mesh.hpp:261
void UserWorkInLoop()
void Initialize(int res_flag, ParameterInput *pin)
Definition: mesh.cpp:1327
int step_since_lb
Definition: mesh.hpp:253
MGGravityDriver * pmgrd
Definition: mesh.hpp:263
Real time
Definition: mesh.hpp:248
void LoadBalancingAndAdaptiveMeshRefinement(ParameterInput *pin)
MeshBlock * pblock
Definition: mesh.hpp:259
int nbnew
Definition: mesh.hpp:251
int ncycle
Definition: mesh.hpp:249
int turb_flag
Definition: mesh.hpp:255
void UserWorkAfterLoop(ParameterInput *pin)
void MakeOutputs(Mesh *pm, ParameterInput *pin, bool wtflag=false)
Definition: outputs.cpp:921
void ParameterDump(std::ostream &os)
void ModifyFromCmdline(int argc, char *argv[])
void ParameterInput::ModifyFromCmdline(int argc, char *argv[])
void LoadFromFile(IOWrapper &input)
void ForwardNextTime(Real time)
void DoTaskListOneStage(Mesh *pmesh, int stage)
Definition: task_list.cpp:67
int nstages
Definition: task_list.hpp:94
int main(int argc, char *argv[])
Definition: main.cpp:63
int nranks
Definition: globals.cpp:24
int mpi_tag_ub
Definition: globals.cpp:25
int my_rank
Definition: globals.cpp:23
int GetSignalFlag(int s)
void SetWallTimeAlarm(int t)
void SignalHandlerInit()
int CheckSignalFlags()
void CancelWallTimeAlarm()
void ChangeRunDir(const char *pdir)
void ShowConfig()