Main Page   Namespace List   Class Hierarchy   Alphabetical List   Compound List   File List   Compound Members   File Members  

LevelDataI.H

Go to the documentation of this file.
00001 /* _______              __
00002   / ___/ /  ___  __ _  / /  ___
00003  / /__/ _ \/ _ \/  ' \/ _ \/ _ \
00004  \___/_//_/\___/_/_/_/_.__/\___/ 
00005 */
00006 //
00007 // This software is copyright (C) by the Lawrence Berkeley
00008 // National Laboratory.  Permission is granted to reproduce
00009 // this software for non-commercial purposes provided that
00010 // this notice is left intact.
00011 // 
00012 // It is acknowledged that the U.S. Government has rights to
00013 // this software under Contract DE-AC03-765F00098 between
00014 // the U.S.  Department of Energy and the University of
00015 // California.
00016 //
00017 // This software is provided as a professional and academic
00018 // contribution for joint exchange. Thus it is experimental,
00019 // is provided ``as is'', with no warranties of any kind
00020 // whatsoever, no support, no promise of updates, or printed
00021 // documentation. By using this software, you acknowledge
00022 // that the Lawrence Berkeley National Laboratory and
00023 // Regents of the University of California shall have no
00024 // liability with respect to the infringement of other
00025 // copyrights by any part of this software.
00026 //
00027 
00028 #include <cstdlib>
00029 #include <algorithm>
00030 #include "parstream.H"
00031 #include "memtrack.H"
00032 
00033 using std::sort;
00034 
00035 template<class T> inline
00036 LevelData<T>::LevelData()
00037   :m_sendbuffer(NULL), m_sendcapacity(0),
00038    m_recbuffer(NULL), m_reccapacity(0)
00039 {
00040 #ifdef MPI
00041 numSends = 0;
00042 #endif
00043 }
00044 
00045 template<class T> inline
00046 LevelData<T>::LevelData(const DisjointBoxLayout& dp, int comps, const IntVect& ghost,
00047                         const DataFactory<T>& a_factory)
00048   : m_disjointBoxLayout(dp), m_ghost(ghost), m_sendbuffer(NULL),
00049     m_sendcapacity(0),  m_recbuffer(NULL),m_reccapacity(0)
00050 { 
00051 #ifdef MPI
00052   numSends = 0;
00053 #endif
00054   m_boxLayout = dp; 
00055   m_comps = comps;
00056   m_isdefined = true;
00057 
00058   if(!dp.isClosed())
00059     {
00060       MayDay::Error("non-disjoint DisjointBoxLayout: LevelData<T>::LevelData(const DisjointBoxLayout& dp, int comps)");
00061     }
00062 
00063   Interval interval(0, comps-1);
00064   allocateGhostVector(a_factory, ghost);
00065   setVector(*this, interval, interval);
00066   
00067   // finally, create copier to be used for exchanges
00068   //if (m_ghost != IntVect::Zero) 
00069   //  m_exchangeCopier.define(m_disjointBoxLayout, m_disjointBoxLayout, m_ghost);
00070 }
00071 
00072 
00073 
00074 // Since I need to thwart the user from invoking the
00075 // 'define' methods that use a general BoxLayout, I cannot
00076 // get at said functions myself now. Ha!  So, I have to recode
00077 // them here.
00078 
00079 template<class T> inline
00080 void LevelData<T>::define(const DisjointBoxLayout& dp, int comps, const IntVect& ghost,
00081                           const DataFactory<T> & a_factory)
00082 {
00083   m_isdefined = true;
00084   if(!dp.isClosed())
00085     {
00086       MayDay::Error("non-disjoint DisjointBoxLayout: LevelData<T>::define(const DisjointBoxLayout& dp,....)");
00087     }
00088   if(comps<=0)
00089     {
00090       MayDay::Error("LevelData::LevelData(const BoxLayout& dp, int comps)  comps<=0");
00091     }
00092   m_comps = comps;
00093   m_boxLayout = dp;
00094 
00095   m_disjointBoxLayout = dp;
00096   m_ghost = ghost;
00097 
00098   Interval interval(0, comps-1);
00099   allocateGhostVector(a_factory, ghost);
00100   setVector(*this, interval, interval);
00101 
00102   
00103   // finally, create copier to be used for exchanges
00104   //if (m_ghost != IntVect::Zero)
00105   //  m_exchangeCopier.define(m_disjointBoxLayout, m_disjointBoxLayout, m_ghost);
00106 }
00107 
00108 
00109 template<class T> inline
00110 void LevelData<T>::define(const LevelData<T>& da,  const DataFactory<T> & a_factory)
00111 {
00112   m_isdefined = true;
00113   if(this == &da) return;
00114   m_disjointBoxLayout = da.m_disjointBoxLayout;
00115   m_boxLayout  = da.m_disjointBoxLayout;
00116   m_comps     = da.m_comps;
00117   m_ghost     = da.m_ghost;
00118 
00119   Interval srcAnddest(0, m_comps-1);
00120 
00121   allocateGhostVector(a_factory, m_ghost);
00122   setVector(da, srcAnddest, srcAnddest);
00123 
00124   
00125   // finally, create copier to be used for exchanges
00126   //if (m_ghost != IntVect::Zero)
00127   //  m_exchangeCopier.define(m_disjointBoxLayout, m_disjointBoxLayout, m_ghost);
00128 }
00129 
00130 
00131 template<class T> inline
00132 void LevelData<T>::define(const LevelData<T>& da, const Interval& comps,
00133                           const DataFactory<T>& a_factory)
00134 {
00135   m_isdefined = true;
00136   if(this == &da){
00137     MayDay::Error(" LevelData<T>::define(const LevelData<T>& da, const Interval& comps) called with 'this'");
00138   }
00139   assert(comps.size()>0);
00140   // this line doesn't make any sense!
00141   //assert(comps.end()<=m_comps);
00142   assert(comps.begin()>=0);
00143 
00144   m_disjointBoxLayout = da.m_disjointBoxLayout;
00145   m_boxLayout  = da.m_disjointBoxLayout;
00146 
00147   m_comps = comps.size();
00148 
00149   m_ghost = da.m_ghost;
00150 
00151   Interval dest(0, m_comps-1);
00152 
00153   allocateGhostVector(a_factory, m_ghost);
00154 
00155   setVector(da, comps, dest);
00156 
00157   
00158   // finally, create copier to be used for exchanges
00159   //if (m_ghost != IntVect::Zero)
00160   //m_exchangeCopier.define(m_disjointBoxLayout, m_disjointBoxLayout, m_ghost);
00161 
00162 }
00163 
00164 template<class T> inline
00165 void LevelData<T>::copyTo(const Interval& srcComps, 
00166                           BoxLayoutData<T>& dest,
00167                           const Interval& destComps) const
00168 {
00169   if((BoxLayoutData<T>*)this == &dest) return;
00170 
00171   if(boxLayout() == dest.boxLayout())
00172     {
00173       // parallel direct copy here, no communication issues
00174       for(DataIterator it(dataIterator()); it.ok(); ++it)
00175         {
00176           dest[it()].copy(box(it()), 
00177                           destComps,
00178                           box(it()),
00179                           this->operator[](it()),
00180                           srcComps);
00181         }
00182       return;
00183     }
00184 
00185   Copier copier(m_disjointBoxLayout, dest.boxLayout());
00186   copyTo(srcComps, dest, destComps, copier);
00187 }
00188 
00189 template<class T> inline
00190 void LevelData<T>::copyTo(const Interval& srcComps, 
00191                           LevelData<T>& dest,
00192                           const Interval& destComps) const
00193 {
00194   if(this == &dest){
00195     MayDay::Error("src == dest in copyTo function. Perhaps you want exchange ?");
00196   }
00197 
00198   if(boxLayout() == dest.boxLayout()  && dest.ghostVect() == IntVect::Zero)
00199     {
00200       // parallel direct copy here, no communication issues
00201       for(DataIterator it(dataIterator()); it.ok(); ++it)
00202         {
00203           dest[it()].copy(box(it()), 
00204                           destComps,
00205                           box(it()),
00206                           this->operator[](it()),
00207                           srcComps);
00208         }
00209       return;
00210     }
00211 
00212   Copier copier(m_disjointBoxLayout, dest.getBoxes(), dest.m_ghost);
00213   copyTo(srcComps, dest, destComps, copier);
00214 }
00215 
00216 
00217 template<class T> inline
00218 void LevelData<T>::copyTo(const Interval& srcComps, 
00219                           BoxLayoutData<T>& dest,
00220                           const Interval& destComps,
00221                           const Copier& copier) const
00222 {
00223 
00224   makeItSo(srcComps, *this, dest, destComps, copier);
00225 
00226 }
00227 
00228 template<class T> inline
00229 void LevelData<T>::copyTo(const Interval& srcComps, 
00230                           LevelData<T>& dest,
00231                           const Interval& destComps,
00232                           const Copier& copier) const
00233 {
00234 
00235   makeItSo(srcComps, *this, dest, destComps, copier);
00236 
00237 }
00238 
00239 template<class T> inline 
00240 void LevelData<T>::exchange(const Interval& comps)
00241 {
00242   // later on we can code this part as a direct algorithm
00243   // by copying and pasting the code from the Copier::define code
00244   // for now, just do the easy to debug approach.
00245   Copier copier(m_disjointBoxLayout, m_disjointBoxLayout, m_ghost);
00246   exchange(comps, copier);
00247 
00248   // if there aren't any ghost cells, there isn't really anything
00249   // to do here (also, if m_ghost == Zero, m_exchangeCopier
00250   // wasn't defined!
00251   //if (m_ghost != IntVect::Zero)
00252   //makeItSo(comps, *this, *this, comps, m_exchangeCopier);
00253 }
00254 
00255 template<class T> inline
00256 void LevelData<T>::exchange(const Interval& comps,
00257                             const Copier& copier)
00258 {
00259   makeItSo(comps, *this, *this, comps, copier);
00260 
00261 }
00262 
00263 template<class T> inline
00264 void LevelData<T>::makeItSo(const Interval&   a_srcComps, 
00265                             const LevelData<T>& a_src,
00266                             BoxLayoutData<T>& a_dest,
00267                             const Interval&   a_destComps,
00268                             const Copier&     a_copier) const
00269 {
00270   // The following five functions are nullOps in uniprocessor mode
00271 
00272   completePendingSends(); // wait for sends from possible previous operation
00273 
00274   allocateBuffers(a_src,  a_srcComps,
00275                   a_dest, a_destComps,
00276                   a_copier);  //monkey with buffers, set up 'fromMe' and 'toMe' queues
00277 
00278   writeSendDataFromMeIntoBuffers(a_src, a_srcComps);
00279 
00280   postReceivesToMe(); // all non-blocking
00281 
00282   postSendsFromMe();  // all non-blocking
00283 
00284   //  computation that could occur during communication should really
00285   //  go here somehow.  while all the non-blocking sending and receiving is
00286   //  going on.  
00287   //
00288   //  my thought is to return from this function at this point an object
00289   //  that encapsulates the argument list above.  
00290   //  a "ChomboMessaging" object.
00291   //  The user can keep a reference
00292   //  to this object and do computations.  When they reach the limit of what
00293   //  they can compute without this communication completing, they call the
00294   //  "finalize()" function of their ChomboMessaging object and the rest of this
00295   //  code below gets executed.
00296   //  a real question though is: is there really enough computation to do while
00297   //  messaging is going on to justify the effort, and what machines really have
00298   //  good asynchronous messaging to make the work worthwhile.
00299   //  
00300   //  the other approach is to more finely decompose the overlapping of
00301   //  messaging and computation by using the ChomboMessaging object in the
00302   //  DataIterator construction.  The DataIterator returns T objects as they
00303   //  are completed from messaging.  This preserves almost all of the Chombo
00304   //  code as is but would be mucho tricky to actually implement and might only
00305   //  gain little.  This would not be a thing to try unitl Chombo is
00306   //  heavily instrumented for performance measuring.  in this design, unpackRecievesToMe()
00307   //  would become a complicated process interwoven with a DataIterator.
00308 
00309   //  postReceivesToMe();
00310 
00311   // perform local copy
00312   for(CopyIterator it(a_copier, CopyIterator::LOCAL); it.ok(); ++it)
00313     {
00314       const MotionItem& item = it();
00315       a_dest[item.toIndex].copy(item.fromRegion, 
00316                                 a_destComps,
00317                                 item.toRegion,
00318                                 a_src[item.fromIndex],
00319                                 a_srcComps);
00320     }
00321 
00322   unpackReceivesToMe(a_dest, a_destComps); // nullOp in uniprocessor mode
00323 
00324 }
00325 
00326 template<class T> inline
00327 void LevelData<T>::define(const BoxLayout& dp, int comps,  const DataFactory<T>& a_factory)
00328 {
00329   MayDay::Error("LevelData<T>::define called with BoxLayout input");
00330 }
00331 
00332 template<class T> inline
00333 void LevelData<T>::define(const BoxLayout& dp)
00334 {
00335   MayDay::Error("LevelData<T>::define called with BoxLayout input");
00336 }
00337 
00338 template<class T> inline
00339 void LevelData<T>::define(const BoxLayoutData<T>& da, const DataFactory<T>& a_factory )
00340 {
00341   MayDay::Error("LevelData<T>::define called with BoxLayout input");
00342 }
00343 
00344 template<class T> inline
00345 void LevelData<T>::define(const BoxLayoutData<T>& da, const Interval& comps,
00346                           const DataFactory<T>& a_factory)
00347 {
00348   MayDay::Error("LevelData<T>::define called with BoxLayout input");
00349 }
00350 
00351 template<class T> inline
00352 LevelData<T>::~LevelData()
00353 {
00354   completePendingSends();
00355   free(m_sendbuffer);
00356   free(m_recbuffer);
00357 }
00358 
00359 #ifndef MPI
00360 // uniprocessor version of all these nullop functions.
00361 template<class T> inline
00362 void LevelData<T>::completePendingSends() const
00363 {;}
00364 
00365 template<class T> inline
00366 void LevelData<T>::allocateBuffers(const LevelData<T>& a_src, 
00367                                    const Interval& a_srcComps,
00368                                    const BoxLayoutData<T>& a_dest,
00369                                    const Interval& a_destComps,
00370                                    const Copier&   a_copier) const
00371 {;}  
00372 
00373 template<class T> inline
00374 void LevelData<T>::writeSendDataFromMeIntoBuffers(const LevelData<T>& a_src, 
00375                                                   const Interval&     a_srcComps) const
00376 {;}
00377 
00378 template<class T> inline
00379 void LevelData<T>::postSendsFromMe() const
00380 {;}
00381 
00382 template<class T> inline
00383 void LevelData<T>::postReceivesToMe() const
00384 {;}
00385 
00386 template<class T> inline
00387 void LevelData<T>::unpackReceivesToMe(BoxLayoutData<T>& a_dest, 
00388                                       const Interval&   a_destComps) const
00389 {;}
00390 
00391 #else
00392 
00393 // MPI versions of the above codes.
00394 
00395 template<class T> inline
00396 void LevelData<T>::completePendingSends() const
00397 {
00398   if(numSends > 0){
00399     int result = MPI_Waitall(numSends, m_sendRequests, m_sendStatus);
00400     if(result != MPI_SUCCESS)
00401       {
00402         //hell if I know what to do about failed messaging here
00403       }
00404 
00405     delete[] m_sendRequests;
00406     delete[] m_sendStatus;
00407   }
00408   numSends = 0;
00409 }
00410 
00411 template<class T> inline
00412 void LevelData<T>::allocateBuffers(const LevelData<T>& a_src, 
00413                                    const Interval& a_srcComps,
00414                                    const BoxLayoutData<T>& a_dest,
00415                                    const Interval& a_destComps,
00416                                    const Copier&   a_copier) const
00417 {
00418   m_fromMe.resize(0);
00419   m_toMe.resize(0);
00420   size_t sendBufferSize = 0;
00421   size_t recBufferSize  = 0;
00422   // two versions of code here.  one for preAllocatable T, one not.
00423 
00424   //  T dummy;
00425   for(CopyIterator it(a_copier, CopyIterator::FROM); it.ok(); ++it)
00426     {
00427       const MotionItem& item = it();
00428       bufEntry b;
00429       b.item = &item;
00430       b.size = a_src[item.fromIndex].size(item.fromRegion, a_srcComps);
00431       sendBufferSize+=b.size;
00432       b.procID = item.procID;
00433       m_fromMe.push_back(b);
00434     }
00435   sort(m_fromMe.begin(), m_fromMe.end());
00436   for(CopyIterator it(a_copier, CopyIterator::TO); it.ok(); ++it)
00437     {
00438       const MotionItem& item = it();
00439       bufEntry b;
00440       b.item = &item;
00441       if (!(T::preAllocatable() == 2))
00442         {
00443           b.size = a_dest[item.toIndex].size(item.toRegion, a_destComps);
00444           recBufferSize+=b.size;
00445         }
00446       b.procID = item.procID;
00447       m_toMe.push_back(b);
00448     }
00449   sort(m_toMe.begin(), m_toMe.end());
00450 
00451   if(T::preAllocatable() == 2) // dynamic allocatable, need two pass
00452     {
00453       // in the non-preallocatable case, I need to message the
00454       // values for the m_toMe[*].size 
00455       if(m_fromMe.size() > 0)
00456         {
00457           MPI_Request nullrequest;
00458 
00459           int lastProc = -1;
00460           int messageIndex = 0;
00461           for(int i=0; i<m_fromMe.size(); ++i)
00462             {
00463               bufEntry& b = m_fromMe[i];
00464               if(b.procID == lastProc) messageIndex++;
00465               else                     messageIndex = 0;
00466               lastProc = b.procID;
00467               MPI_Isend(&(b.size), 1, MPI_INT, b.procID, 
00468                         messageIndex, Chombo_MPI::comm, &(nullrequest));
00469               MPI_Request_free(&(nullrequest));  // we don't wait on these sends, assume
00470               //    this memory really doesn't go anywhere.  Most MPI systems will fast message
00471               //    a single integer message
00472             }
00473         }
00474       if(m_toMe.size() > 0)
00475         {
00476           m_receiveRequests = new MPI_Request[m_toMe.size()];
00477           m_receiveStatus   = new MPI_Status[m_toMe.size()];
00478           int lastProc = -1;
00479           int messageIndex = 0;
00480           for(int i=0; i<m_toMe.size(); ++i)
00481             {
00482               bufEntry& b = m_toMe[i];
00483               if(b.procID == lastProc) messageIndex++;
00484               else                     messageIndex = 0;
00485               lastProc = b.procID;
00486               MPI_Irecv(&(b.size), 1, MPI_INT, b.procID, 
00487                         messageIndex, Chombo_MPI::comm, m_receiveRequests+i);
00488             }
00489 
00490           int result = MPI_Waitall(m_toMe.size(), m_receiveRequests, m_receiveStatus);
00491           if(result != MPI_SUCCESS)
00492             {
00493               MayDay::Error("First pass of two-phase communication failed");
00494             }
00495           for(int i=0; i<m_toMe.size(); ++i)  recBufferSize+= m_toMe[i].size;
00496           delete[] m_receiveRequests;
00497           delete[] m_receiveStatus;
00498         }
00499     }
00500 
00501   // allocate send and receveive buffer space.
00502 
00503   if(sendBufferSize > m_sendcapacity)
00504     {
00505       free(m_sendbuffer);
00506       m_sendbuffer = malloc(sendBufferSize);
00507       if(m_sendbuffer == NULL)
00508         {
00509           MayDay::Error("Out of memory in LevelData::allocatebuffers");
00510         }
00511       m_sendcapacity = sendBufferSize;
00512     }
00513 
00514   if(recBufferSize > m_reccapacity)
00515     {
00516       free(m_recbuffer);
00517       m_recbuffer = malloc(recBufferSize);
00518       if(m_recbuffer == NULL)
00519         {
00520           MayDay::Error("Out of memory in LevelData::allocatebuffers");
00521         }
00522       m_reccapacity = recBufferSize;
00523     }
00524 
00525 
00526   /*
00527   pout()<<"\n";
00528   for(int i=0; i<m_fromMe.size(); i++) 
00529     pout()<<m_fromMe[i].item->region<<"{"<<m_fromMe[i].procID<<"}"<<" ";
00530   pout() <<"::::";
00531   for(int i=0; i<m_toMe.size(); i++) 
00532     pout()<<m_toMe[i].item->region<<"{"<<m_toMe[i].procID<<"}"<<" ";
00533   pout() << endl;
00534   */
00535 
00536   char* nextFree = (char*)m_sendbuffer;
00537   if(m_fromMe.size() > 0)
00538   {
00539     for(unsigned int i=0; i<m_fromMe.size(); ++i)
00540       {
00541         m_fromMe[i].bufPtr = nextFree;
00542         nextFree += m_fromMe[i].size;
00543       }
00544   }
00545 
00546   nextFree = (char*)m_recbuffer;
00547   if(m_toMe.size() > 0)
00548   {
00549     for(unsigned int i=0; i<m_toMe.size(); ++i)
00550       {
00551         m_toMe[i].bufPtr = nextFree;
00552         nextFree += m_toMe[i].size;
00553       }
00554   }
00555 
00556   // since fromMe and toMe are sorted based on procID, messages can now be grouped
00557   // together on a per-processor basis.
00558 
00559 }
00560 
00561 
00562 
00563 template<class T> inline
00564 void LevelData<T>::writeSendDataFromMeIntoBuffers(const LevelData<T>& a_src, 
00565                                                   const Interval&     a_srcComps) const
00566 {
00567 
00568 
00569   for(unsigned int i=0; i<m_fromMe.size(); ++i)
00570     {
00571       const bufEntry& entry = m_fromMe[i];
00572       a_src[entry.item->fromIndex].linearOut(entry.bufPtr, entry.item->fromRegion, a_srcComps);
00573     }
00574 
00575 }
00576 
00577 template<class T> inline
00578 void LevelData<T>::postSendsFromMe() const
00579 {
00580 
00581   // now we get the magic of message coalescence
00582   // fromMe has already been sorted in the allocateBuffers() step.
00583 
00584   numSends = m_fromMe.size();
00585   if(numSends > 1){
00586   for(unsigned int i=m_fromMe.size()-1; i>0; --i)
00587     {
00588       if(m_fromMe[i].procID == m_fromMe[i-1].procID) 
00589         {
00590           numSends--;
00591           m_fromMe[i-1].size+=m_fromMe[i].size;
00592           m_fromMe[i].size = 0;
00593         }
00594     }
00595   }
00596   m_sendRequests = new MPI_Request[numSends];
00597   m_sendStatus = new MPI_Status[numSends];
00598 
00599 
00600   unsigned int next=0;
00601   for(int i=0; i<numSends; ++i)
00602     {
00603       const bufEntry& entry = m_fromMe[next];
00604 //       cout<<procID()<< ": sending message of "<<entry.size;
00605 //       cout<<" to proc "<<  entry.procID<<endl;
00606       MPI_Isend(entry.bufPtr, entry.size, MPI_BYTE, entry.procID, 
00607                 0, Chombo_MPI::comm, m_sendRequests+i);
00608       ++next;
00609       while(next < m_fromMe.size() && m_fromMe[next].size == 0) ++next;
00610     }
00611 
00612 
00613 }
00614 
00615 template<class T> inline
00616 void LevelData<T>::postReceivesToMe() const
00617 {
00618   numReceives = m_toMe.size();
00619 
00620   if(numReceives > 1){
00621   for(unsigned int i=m_toMe.size()-1; i>0; --i)
00622     {
00623       if(m_toMe[i].procID == m_toMe[i-1].procID) 
00624         {
00625           numReceives--;
00626           m_toMe[i-1].size+=m_toMe[i].size;
00627           m_toMe[i].size = 0;
00628         }
00629     }
00630   }
00631   m_receiveRequests = new MPI_Request[numReceives];
00632   m_receiveStatus = new MPI_Status[numReceives];
00633 
00634 
00635   unsigned int next=0;
00636   for(int i=0; i<numReceives; ++i)
00637     {
00638       const bufEntry& entry = m_toMe[next];
00639  //      cout<<procID()<< ": receiving message of "<<entry.size;
00640 //       cout<<" from proc "<<  entry.procID<<endl;
00641       MPI_Irecv(entry.bufPtr, entry.size, MPI_BYTE, entry.procID, 
00642                 0, Chombo_MPI::comm, m_receiveRequests+i);
00643       ++next;
00644       while(next < m_toMe.size() && m_toMe[next].size == 0) ++next;
00645     }
00646 
00647 }
00648 
00649 
00650 template<class T> inline
00651 void LevelData<T>::unpackReceivesToMe(BoxLayoutData<T>& a_dest, 
00652                                       const Interval&   a_destComps) const
00653 {
00654   // wish asynchronous worked communication worked on all machines so that
00655   // I didn't have to do this part:
00656   completePendingSends();
00657 
00658 
00659   if(numReceives > 0){
00660     int result = MPI_Waitall(numReceives, m_receiveRequests, m_receiveStatus);
00661     if(result != MPI_SUCCESS)
00662       {
00663         //hell if I know what to do about failed messaging here
00664       }
00665 
00666     for(unsigned int i=0; i<m_toMe.size(); ++i)
00667       {
00668         const bufEntry& entry = m_toMe[i];
00669         a_dest[entry.item->toIndex].linearIn(entry.bufPtr, entry.item->toRegion, a_destComps);
00670       }
00671 
00672     delete[] m_receiveRequests;
00673     delete[] m_receiveStatus;
00674   }
00675   numReceives = 0;
00676 }
00677 
00678 #endif
00679 
00680 
00681 
00682 
00683 
00684 
00685 

Generated on Thu Aug 29 11:05:45 2002 for Chombo&INS by doxygen1.2.16