HΦ  3.1.0
CheckMPI.c File Reference

Compute total number of electrons, spins. More...

#include "Common.h"
#include "wrapperMPI.h"

Go to the source code of this file.

Functions

int CheckMPI (struct BindStruct *X)
 Define the number of sites in each PE (DefineList.Nsite). Reduce the number of electrons (DefineList.Ne), total Sz (DefineList.Total2Sz) by them in the inter process region. More...
 
void CheckMPI_Summary (struct BindStruct *X)
 Print infomation of MPI parallelization Modify Definelist::Tpow in the inter process region. More...
 

Detailed Description

Compute total number of electrons, spins.

Definition in file CheckMPI.c.

Function Documentation

◆ CheckMPI()

int CheckMPI ( struct BindStruct X)

Define the number of sites in each PE (DefineList.Nsite). Reduce the number of electrons (DefineList.Ne), total Sz (DefineList.Total2Sz) by them in the inter process region.

Author
Mitsuaki Kawamura (The University of Tokyo)

Branch for each model

Check the number of processes for Boost

Parameters
[in,out]X

Definition at line 27 of file CheckMPI.c.

References cErrNProcNumber, cErrNProcNumberGneralSpin, cErrNProcNumberHubbard, cErrNProcNumberSet, cErrNProcNumberSpin, exitMPI(), FALSE, ITINERANT, myrank, nproc, stdoutMPI, TRUE, and X.

Referenced by check().

28 {
29  int isite, NDimInterPE, SmallDim, SpinNum, ipivot, ishift, isiteMax, isiteMax0;
30 
35  X->Def.NsiteMPI = X->Def.Nsite;
36  X->Def.Total2SzMPI = X->Def.Total2Sz;
37  switch (X->Def.iCalcModel) {
38  case HubbardGC: /****************************************************/
39  case Hubbard:
40  case HubbardNConserved:
41  case Kondo:
42  case KondoGC:
43 
49  NDimInterPE = 1;
50  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
51  if (NDimInterPE == nproc) {
52  X->Def.Nsite = isite;
53  break;
54  } /*if (NDimInterPE == nproc)*/
55  NDimInterPE *= 4;
56  } /*for (isite = NsiteMPI; isite > 0; isite--)*/
57 
58  if (isite == 0) {
59  fprintf(stdoutMPI, "%s", cErrNProcNumberHubbard);
60  fprintf(stdoutMPI, cErrNProcNumber, nproc);
61  NDimInterPE = 1;
62  int ismallNproc=1;
63  int ilargeNproc=1;
64  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
65  if (NDimInterPE > nproc) {
66  ilargeNproc = NDimInterPE;
67  if(isite >1)
68  ismallNproc = NDimInterPE/4;
69  break;
70  }/*if (NDimInterPE > nproc)*/
71  NDimInterPE *= 4;
72  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
73  fprintf(stdoutMPI, cErrNProcNumberSet,ismallNproc, ilargeNproc );
74  return FALSE;
75  //return FALSE;
76  } /*if (isite == 0)*/
77 
78  switch (X->Def.iCalcModel) /*2 (inner)*/ {
79 
80  case Hubbard:
86  SmallDim = myrank;
87  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
88  SpinNum = SmallDim % 4;
89  SmallDim /= 4;
90  if (SpinNum == 1 /*01*/) {
91  X->Def.Nup -= 1;
92  X->Def.Ne -= 1;
93  }
94  else if (SpinNum == 2 /*10*/) {
95  X->Def.Ndown -= 1;
96  X->Def.Ne -= 1;
97  }
98  else if (SpinNum == 3 /*11*/){
99  X->Def.Nup -= 1;
100  X->Def.Ndown -= 1;
101  X->Def.Ne -= 2;
102  }
103  } /*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
104 
105  break;/*case Hubbard:*/
106 
107  case HubbardNConserved:
112  SmallDim = myrank;
113  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
114  SpinNum = SmallDim % 4;
115  SmallDim /= 4;
116  if (SpinNum == 1 /*01*/ || SpinNum == 2 /*10*/) X->Def.Ne -= 1;
117  else if (SpinNum == 3 /*11*/) X->Def.Ne -= 2;
118  } /*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
119 
120  break; /*case HubbardNConserved:*/
121 
122  case KondoGC:
123  case Kondo:
129  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)
130  if (X->Def.LocSpn[isite] != ITINERANT) X->Def.NLocSpn -= 1;
131 
132  if (X->Def.iCalcModel == Kondo) {
133  SmallDim = myrank;
134  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
135  SpinNum = SmallDim % 4;
136  SmallDim /= 4;
137  if (X->Def.LocSpn[isite] == ITINERANT) {
138  if (SpinNum == 1 /*01*/) {
139  X->Def.Nup -= 1;
140  X->Def.Ne -= 1;
141  }
142  else if (SpinNum == 2 /*10*/) {
143  X->Def.Ndown -= 1;
144  X->Def.Ne -= 1;
145  }
146  else if (SpinNum == 3 /*11*/) {
147  X->Def.Nup -= 1;
148  X->Def.Ndown -= 1;
149  X->Def.Ne -= 2;
150  }
151  }
152  else {
153  fprintf(stdoutMPI, "\n Stop because local spin in the inter process region\n");
154  return FALSE;
155  }
156  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
157  } /*if (X->Def.iCalcModel == Kondo)*/
158 
159  break; /*case KondoGC, Kondo*/
160 
161  } /*switch (X->Def.iCalcModel) 2(inner)*/
162 
163  break; /*case HubbardGC, Hubbard, HubbardNConserved, Kondo, KondoGC:*/
165  case SpinGC:/********************************************************/
166  case Spin:
167 
168  if (X->Def.iFlgGeneralSpin == FALSE) {
173  NDimInterPE = 1;
174  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
175  if (NDimInterPE == nproc) {
176  X->Def.Nsite = isite;
177  break;
178  }/*if (NDimInterPE == nproc)*/
179  NDimInterPE *= 2;
180  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
181 
182  if (isite == 0) {
183  fprintf(stdoutMPI, "%s", cErrNProcNumberSpin);
184  fprintf(stdoutMPI, cErrNProcNumber, nproc);
185  NDimInterPE = 1;
186  int ismallNproc=1;
187  int ilargeNproc=1;
188  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
189  if (NDimInterPE > nproc) {
190  ilargeNproc = NDimInterPE;
191  if(isite >1)
192  ismallNproc = NDimInterPE/2;
193  break;
194  }/*if (NDimInterPE > nproc)*/
195  NDimInterPE *= 2;
196  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
197  fprintf(stdoutMPI, cErrNProcNumberSet,ismallNproc, ilargeNproc );
198  return FALSE;
199  }/*if (isite == 0)*/
200 
201  if (X->Def.iCalcModel == Spin) {
202  /*X->Def.NeMPI = X->Def.Ne;*/
203 
204  /* Ne should be different in each PE */
205  SmallDim = myrank;
206  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
207  SpinNum = SmallDim % 2;
208  SmallDim /= 2;
209  if (SpinNum == 0) {
210  X->Def.Ndown -= 1;
211  }
212  else {
213  X->Def.Ne -= 1;
214  X->Def.Nup -= 1;
215  }
216  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
217  }/*if (X->Def.iCalcModel == Spin)*/
218 
219  } /*if (X->Def.iFlgGeneralSpin == FALSE)*/
220  else{/* General Spin */
225  NDimInterPE = 1;
226  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
227  if (NDimInterPE == nproc) {
228  X->Def.Nsite = isite;
229  break;
230  }/*if (NDimInterPE == nproc)*/
231  NDimInterPE *= X->Def.SiteToBit[isite - 1];
232  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
233 
234  if (isite == 0) {
235  fprintf(stdoutMPI, "%s", cErrNProcNumberGneralSpin);
236  fprintf(stdoutMPI, cErrNProcNumber, nproc);
237  NDimInterPE = 1;
238  int ismallNproc=1;
239  int ilargeNproc=1;
240  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
241  if (NDimInterPE > nproc) {
242  ilargeNproc = NDimInterPE;
243  if(isite >1)
244  ismallNproc = NDimInterPE/X->Def.SiteToBit[isite - 2];
245  break;
246  }/*if (NDimInterPE > nproc)*/
247  NDimInterPE *= X->Def.SiteToBit[isite - 1];
248  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
249  fprintf(stdoutMPI, cErrNProcNumberSet,ismallNproc, ilargeNproc );
250  return FALSE;
251  }/*if (isite == 0)*/
252 
253  if (X->Def.iCalcModel == Spin) {
254  X->Def.Total2SzMPI = X->Def.Total2Sz;
255 
256  /* Ne should be different in each PE */
257  SmallDim = myrank;
258  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
259  SpinNum = SmallDim % X->Def.SiteToBit[isite];
260  SmallDim /= X->Def.SiteToBit[isite];
261 
262  X->Def.Total2Sz += X->Def.SiteToBit[isite] - 1 - 2*SpinNum;
263  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
264  }/*if (X->Def.iCalcModel == Spin)*/
265  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
266 
268  break; /*case SpinGC, Spin*/
269 
270  default:
271  fprintf(stdoutMPI, "Error ! Wrong model !\n");
272  return FALSE;
273  }/*switch (X->Def.iCalcModel)*/
274 
278  if (X->Boost.flgBoost == 1) {
279  isiteMax = X->Boost.W0;
280  ishift = 0;
281  for (ipivot = 0; ipivot < X->Boost.num_pivot; ipivot++) {
282  isiteMax0 = X->Boost.list_6spin_star[ipivot][1]
283  + X->Boost.list_6spin_star[ipivot][2]
284  + X->Boost.list_6spin_star[ipivot][3]
285  + X->Boost.list_6spin_star[ipivot][4]
286  + X->Boost.list_6spin_star[ipivot][5];
287  if (ishift > 1) isiteMax0 = X->Def.NsiteMPI - isiteMax0 - 1 - ishift;
288  else isiteMax0 = X->Def.NsiteMPI - isiteMax0 - 2;
289  if (isiteMax0 < isiteMax) isiteMax = isiteMax0;
290  if (X->Boost.list_6spin_star[ipivot][6] == 1) ishift += X->Boost.ishift_nspin;
291  }/*for (ipivot = 0; ipivot < X->Boost.num_pivot; ipivot++)*/
292 
293  NDimInterPE = 1;
294  for (isite = 0; isite < isiteMax; isite++) NDimInterPE *= 2;
295 
296  if (NDimInterPE < nproc) {
297  fprintf(stderr, "\n Error ! in ReadDefFileIdxPara.\n");
298  fprintf(stderr, "Too many MPI processes ! It should be <= %d. \n\n", NDimInterPE);
299  exitMPI(-1);
300  }/*if (NDimInterPE < nproc)*/
301  }/*if (X->Boost.flgBoost == 1)*/
302 
303  return TRUE;
304 }/*void CheckMPI*/
void exitMPI(int errorcode)
MPI Abortation wrapper.
Definition: wrapperMPI.c:86
char * cErrNProcNumberGneralSpin
Definition: ErrorMessage.c:93
#define ITINERANT
Definition: global.h:31
#define TRUE
Definition: global.h:26
char * cErrNProcNumberSet
Definition: ErrorMessage.c:95
char * cErrNProcNumberSpin
Definition: ErrorMessage.c:92
char * cErrNProcNumber
Definition: ErrorMessage.c:94
int nproc
Number of processors, defined in InitializeMPI()
Definition: global.h:161
#define FALSE
Definition: global.h:25
char * cErrNProcNumberHubbard
Error Message in CheckMPI.c.
Definition: ErrorMessage.c:91
struct EDMainCalStruct X
Definition: struct.h:431
int myrank
Process ID, defined in InitializeMPI()
Definition: global.h:162
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.h:164

◆ CheckMPI_Summary()

void CheckMPI_Summary ( struct BindStruct X)

Print infomation of MPI parallelization Modify Definelist::Tpow in the inter process region.

Author
Mitsuaki Kawamura (The University of Tokyo)

Print the configuration in the inter process region of each PE as a binary (excepting general spin) format.

Reset DefineList::Tpow[DefNsite], DefineList::Tpow[DefNsite + 1] ... as inter process space For Hubbard & Kondo system, define DefineList::OrgTpow which is not affected by the number of processes.

Parameters
[in,out]X

Definition at line 310 of file CheckMPI.c.

References exitMPI(), FALSE, myrank, nproc, stdoutMPI, SumMPI_i(), SumMPI_li(), and X.

Referenced by check().

310  {
311 
312  int isite, iproc, SmallDim, SpinNum, Nelec;
313  unsigned long int idimMPI;
314 
315  fprintf(stdoutMPI, "\n\n###### MPI site separation summary ######\n\n");
316  fprintf(stdoutMPI, " INTRA process site\n");
317  fprintf(stdoutMPI, " Site Bit\n");
318  for (isite = 0; isite < X->Def.Nsite; isite++) {
319  switch (X->Def.iCalcModel) {
320  case HubbardGC:
321  case Hubbard:
322  case HubbardNConserved:
323  case Kondo:
324  case KondoGC:
325 
326  fprintf(stdoutMPI, " %4d %4d\n", isite, 4);
327  break;
328 
329  case Spin:
330  case SpinGC:
331 
332  if (X->Def.iFlgGeneralSpin == FALSE) {
333  fprintf(stdoutMPI, " %4d %4d\n", isite, 2);
334  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
335  else {
336  fprintf(stdoutMPI, " %4d %4ld\n", isite, X->Def.SiteToBit[isite]);
337  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
338 
339  break;
340 
341  } /*switch (X->Def.iCalcModel)*/
342  } /*for (isite = 0; isite < X->Def.Nsite; isite++)*/
343 
344  fprintf(stdoutMPI, "\n INTER process site\n");
345  fprintf(stdoutMPI, " Site Bit\n");
346  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
347  switch (X->Def.iCalcModel) {
348  case HubbardGC:
349  case Hubbard:
350  case HubbardNConserved:
351  case Kondo:
352  case KondoGC:
353 
354  fprintf(stdoutMPI, " %4d %4d\n", isite, 4);
355  break;
356 
357  case Spin:
358  case SpinGC:
359 
360  if (X->Def.iFlgGeneralSpin == FALSE) {
361  fprintf(stdoutMPI, " %4d %4d\n", isite, 2);
362  }/*if (X->Def.iFlgGeneralSpin == FALSE) */
363  else {
364  fprintf(stdoutMPI, " %4d %4ld\n", isite, X->Def.SiteToBit[isite]);
365  }/*if (X->Def.iFlgGeneralSpin == TRUE) */
366 
367  break;
368 
369  }/*switch (X->Def.iCalcModel)*/
370  }/*for (isite = X->Def.Nsite; isite < NsiteMPI; isite++)*/
371 
372  fprintf(stdoutMPI, "\n Process element info\n");
373  fprintf(stdoutMPI, " Process Dimension Nup Ndown Nelec Total2Sz State\n");
374 
375  for (iproc = 0; iproc < nproc; iproc++) {
376 
377  fprintf(stdoutMPI, " %7d", iproc);
378 
379  if (myrank == iproc) idimMPI = X->Check.idim_max;
380  else idimMPI = 0;
381  fprintf(stdoutMPI, " %15ld", SumMPI_li(idimMPI));
382 
383  if (myrank == iproc) Nelec = X->Def.Nup;
384  else Nelec = 0;
385  fprintf(stdoutMPI, " %4d", SumMPI_i(Nelec));
386 
387  if (myrank == iproc) Nelec = X->Def.Ndown;
388  else Nelec = 0;
389  fprintf(stdoutMPI, " %5d", SumMPI_i(Nelec));
390 
391  if (myrank == iproc){
392  Nelec = X->Def.Ne; //X->Def.Nup
393  if (X->Def.iCalcModel == Spin || X->Def.iCalcModel == SpinGC) Nelec += X->Def.Ndown;
394  }
395  else Nelec = 0;
396 
397  fprintf(stdoutMPI, " %5d", SumMPI_i(Nelec));
398 
399  if (myrank == iproc) Nelec = X->Def.Total2Sz;
400  else Nelec = 0;
401  fprintf(stdoutMPI, " %8d ", SumMPI_i(Nelec));
406  switch (X->Def.iCalcModel) {
407  case HubbardGC: /****************************************************/
408  case Hubbard:
409  case HubbardNConserved:
410  case Kondo:
411  case KondoGC:
412 
413  SmallDim = iproc;
414  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
415  SpinNum = SmallDim % 4;
416  SmallDim /= 4;
417  if (SpinNum == 0) fprintf(stdoutMPI, "00");
418  else if (SpinNum == 1) fprintf(stdoutMPI, "01");
419  else if (SpinNum == 2) fprintf(stdoutMPI, "10");
420  else if (SpinNum == 3) fprintf(stdoutMPI, "11");
421  } /*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
422 
423  break;
424 
425  case Spin:
426  case SpinGC:
427 
428  SmallDim = iproc;
429  if (X->Def.iFlgGeneralSpin == FALSE) {
430  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
431  SpinNum = SmallDim % 2;
432  SmallDim /= 2;
433  fprintf(stdoutMPI, "%1d", SpinNum);
434  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
435  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
436  else {
437  SmallDim = iproc;
438  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
439  SpinNum = SmallDim % (int)X->Def.SiteToBit[isite];
440  SmallDim /= X->Def.SiteToBit[isite];
441  fprintf(stdoutMPI, "%1d", SpinNum);
442  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
443  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
444 
445  break;
446 
447  }/*switch (X->Def.iCalcModel)*/
448  fprintf(stdoutMPI, "\n");
449  }/*for (iproc = 0; iproc < nproc; iproc++)*/
450 
451  X->Check.idim_maxMPI = SumMPI_li(X->Check.idim_max);
452  fprintf(stdoutMPI, "\n Total dimension : %ld\n\n", X->Check.idim_maxMPI);
453  if (X->Check.idim_maxMPI < 1) {
454  fprintf(stdoutMPI, "ERROR! Total dimension < 1\n");
455  exitMPI(-1);
456  }
457 
464  switch (X->Def.iCalcModel) {
465  case HubbardGC: /****************************************************/
466  case Hubbard:
467  case HubbardNConserved:
468  case Kondo:
469  case KondoGC:
470 
471  X->Def.Tpow[2 * X->Def.Nsite] = 1;
472  for (isite = 2 * X->Def.Nsite + 1; isite < 2 * X->Def.NsiteMPI; isite++)
473  X->Def.Tpow[isite] = X->Def.Tpow[isite - 1] * 2;
474 
475  X->Def.OrgTpow[0]=1;
476  for (isite = 1; isite < 2 * X->Def.NsiteMPI; isite++)
477  X->Def.OrgTpow[isite] = X->Def.OrgTpow[isite-1]*2;
478 
479  break;
480 
481  case SpinGC:/********************************************************/
482  case Spin:
483 
484  if (X->Def.iFlgGeneralSpin == FALSE) {
485 
486  X->Def.Tpow[X->Def.Nsite] = 1;
487  for (isite = X->Def.Nsite + 1; isite < X->Def.NsiteMPI; isite++)
488  X->Def.Tpow[isite] = X->Def.Tpow[isite - 1] * 2;
489 
490  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
491  else{
492 
493  X->Def.Tpow[X->Def.Nsite] = 1;
494  for (isite = X->Def.Nsite + 1; isite < X->Def.NsiteMPI; isite++)
495  X->Def.Tpow[isite] = X->Def.Tpow[isite - 1] * X->Def.SiteToBit[isite - 1];
496 
497  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
498  break;
499  } /*switch (X->Def.iCalcModel)*/
500 }/*void CheckMPI_Summary*/
void exitMPI(int errorcode)
MPI Abortation wrapper.
Definition: wrapperMPI.c:86
int nproc
Number of processors, defined in InitializeMPI()
Definition: global.h:161
int SumMPI_i(int idim)
MPI wrapper function to obtain sum of integer across processes.
Definition: wrapperMPI.c:256
#define FALSE
Definition: global.h:25
struct EDMainCalStruct X
Definition: struct.h:431
int myrank
Process ID, defined in InitializeMPI()
Definition: global.h:162
unsigned long int SumMPI_li(unsigned long int idim)
MPI wrapper function to obtain sum of unsigned long integer across processes.
Definition: wrapperMPI.c:239
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.h:164