Main Page   Namespace List   Class Hierarchy   Alphabetical List   Compound List   File List   Compound Members   File Members  

LevelDataI.H

Go to the documentation of this file.
00001 /* _______              __
00002   / ___/ /  ___  __ _  / /  ___
00003  / /__/ _ \/ _ \/  ' \/ _ \/ _ \
00004  \___/_//_/\___/_/_/_/_.__/\___/ 
00005 */
00006 //
00007 // This software is copyright (C) by the Lawrence Berkeley
00008 // National Laboratory.  Permission is granted to reproduce
00009 // this software for non-commercial purposes provided that
00010 // this notice is left intact.
00011 // 
00012 // It is acknowledged that the U.S. Government has rights to
00013 // this software under Contract DE-AC03-765F00098 between
00014 // the U.S.  Department of Energy and the University of
00015 // California.
00016 //
00017 // This software is provided as a professional and academic
00018 // contribution for joint exchange. Thus it is experimental,
00019 // is provided ``as is'', with no warranties of any kind
00020 // whatsoever, no support, no promise of updates, or printed
00021 // documentation. By using this software, you acknowledge
00022 // that the Lawrence Berkeley National Laboratory and
00023 // Regents of the University of California shall have no
00024 // liability with respect to the infringement of other
00025 // copyrights by any part of this software.
00026 //
00027 
00028 #include <cstdlib>
00029 #include <algorithm>
00030 #include "parstream.H"
00031 #include "memtrack.H"
00032 
00033 using std::sort;
00034 
00035 template<class T> inline
00036 LevelData<T>::LevelData()
00037   :m_sendbuffer(NULL), m_sendcapacity(0),
00038    m_recbuffer(NULL), m_reccapacity(0)
00039 {
00040 #ifdef MPI
00041   numSends = 0;
00042   numReceives = 0;
00043 #endif
00044 }
00045 
00046 template<class T> inline
00047 LevelData<T>::LevelData(const DisjointBoxLayout& dp, int comps, const IntVect& ghost,
00048                         const DataFactory<T>& a_factory)
00049   : m_disjointBoxLayout(dp), m_ghost(ghost), m_sendbuffer(NULL),
00050     m_sendcapacity(0),  m_recbuffer(NULL),m_reccapacity(0)
00051 { 
00052 #ifdef MPI
00053   numSends = 0;
00054   numReceives = 0;
00055 #endif
00056   m_boxLayout = dp; 
00057   m_comps = comps;
00058   m_isdefined = true;
00059 
00060   if(!dp.isClosed())
00061     {
00062       MayDay::Error("non-disjoint DisjointBoxLayout: LevelData<T>::LevelData(const DisjointBoxLayout& dp, int comps)");
00063     }
00064 
00065   Interval interval(0, comps-1);
00066   allocateGhostVector(a_factory, ghost);
00067   setVector(*this, interval, interval);
00068   
00069  
00070 }
00071 
00072 
00073 
00074 // Since I need to thwart the user from invoking the
00075 // 'define' methods that use a general BoxLayout, I cannot
00076 // get at said functions myself now. Ha!  So, I have to recode
00077 // them here.
00078 
00079 template<class T> inline
00080 void LevelData<T>::define(const DisjointBoxLayout& dp, int comps, const IntVect& ghost,
00081                           const DataFactory<T> & a_factory)
00082 {
00083   m_isdefined = true;
00084   if(!dp.isClosed())
00085     {
00086       MayDay::Error("non-disjoint DisjointBoxLayout: LevelData<T>::define(const DisjointBoxLayout& dp,....)");
00087     }
00088   if(comps<=0)
00089     {
00090       MayDay::Error("LevelData::LevelData(const BoxLayout& dp, int comps)  comps<=0");
00091     }
00092   m_comps = comps;
00093   m_boxLayout = dp;
00094 
00095   m_disjointBoxLayout = dp;
00096   m_ghost = ghost;
00097 
00098   Interval interval(0, comps-1);
00099   allocateGhostVector(a_factory, ghost);
00100   setVector(*this, interval, interval);
00101 
00102 #ifdef MPI  
00103   m_fromMe.resize(0);
00104   m_toMe.resize(0);
00105 #endif
00106 
00107 }
00108 
00109 
00110 template<class T> inline
00111 void LevelData<T>::define(const LevelData<T>& da,  const DataFactory<T> & a_factory)
00112 {
00113   m_isdefined = true;
00114   if(this == &da) return;
00115   m_disjointBoxLayout = da.m_disjointBoxLayout;
00116   m_boxLayout  = da.m_disjointBoxLayout;
00117   m_comps     = da.m_comps;
00118   m_ghost     = da.m_ghost;
00119 
00120   Interval srcAnddest(0, m_comps-1);
00121 
00122   allocateGhostVector(a_factory, m_ghost);
00123   setVector(da, srcAnddest, srcAnddest);
00124 
00125 #ifdef MPI
00126   m_fromMe.resize(0);
00127   m_toMe.resize(0);
00128 #endif
00129 }
00130 
00131 
00132 template<class T> inline
00133 void LevelData<T>::define(const LevelData<T>& da, const Interval& comps,
00134                           const DataFactory<T>& a_factory)
00135 {
00136   m_isdefined = true;
00137   if(this == &da){
00138     MayDay::Error(" LevelData<T>::define(const LevelData<T>& da, const Interval& comps) called with 'this'");
00139   }
00140   assert(comps.size()>0);
00141   // this line doesn't make any sense!
00142   //assert(comps.end()<=m_comps);
00143   assert(comps.begin()>=0);
00144 
00145   m_disjointBoxLayout = da.m_disjointBoxLayout;
00146   m_boxLayout  = da.m_disjointBoxLayout;
00147 
00148   m_comps = comps.size();
00149 
00150   m_ghost = da.m_ghost;
00151 
00152   Interval dest(0, m_comps-1);
00153 
00154   allocateGhostVector(a_factory, m_ghost);
00155 
00156   setVector(da, comps, dest);
00157 
00158 #ifdef MPI
00159   m_fromMe.resize(0);
00160   m_toMe.resize(0);
00161 #endif
00162 }
00163 
00164 template<class T> inline
00165 void LevelData<T>::copyTo(const Interval& srcComps, 
00166                           BoxLayoutData<T>& dest,
00167                           const Interval& destComps) const
00168 {
00169   if((BoxLayoutData<T>*)this == &dest) return;
00170 
00171   if(boxLayout() == dest.boxLayout())
00172     {
00173       // parallel direct copy here, no communication issues
00174       for(DataIterator it(dataIterator()); it.ok(); ++it)
00175         {
00176           dest[it()].copy(box(it()), 
00177                           destComps,
00178                           box(it()),
00179                           this->operator[](it()),
00180                           srcComps);
00181         }
00182       return;
00183     }
00184 
00185   Copier copier(m_disjointBoxLayout, dest.boxLayout());
00186   copyTo(srcComps, dest, destComps, copier);
00187 }
00188 
00189 template<class T> inline
00190 void LevelData<T>::copyTo(const Interval& srcComps, 
00191                           LevelData<T>& dest,
00192                           const Interval& destComps) const
00193 {
00194   if(this == &dest){
00195     MayDay::Error("src == dest in copyTo function. Perhaps you want exchange ?");
00196   }
00197 
00198   if(boxLayout() == dest.boxLayout()  && dest.ghostVect() == IntVect::Zero)
00199     {
00200       // parallel direct copy here, no communication issues
00201       for(DataIterator it(dataIterator()); it.ok(); ++it)
00202         {
00203           dest[it()].copy(box(it()), 
00204                           destComps,
00205                           box(it()),
00206                           this->operator[](it()),
00207                           srcComps);
00208         }
00209       return;
00210     }
00211 
00212   Copier copier(m_disjointBoxLayout, dest.getBoxes(), dest.m_ghost);
00213   copyTo(srcComps, dest, destComps, copier);
00214 }
00215 
00216 
00217 template<class T> inline
00218 void LevelData<T>::copyTo(const Interval& srcComps, 
00219                           BoxLayoutData<T>& dest,
00220                           const Interval& destComps,
00221                           const Copier& copier) const
00222 {
00223 
00224   makeItSo(srcComps, *this, dest, destComps, copier);
00225 
00226 }
00227 
00228 template<class T> inline
00229 void LevelData<T>::copyTo(const Interval& srcComps, 
00230                           LevelData<T>& dest,
00231                           const Interval& destComps,
00232                           const Copier& copier) const
00233 {
00234 
00235   makeItSo(srcComps, *this, dest, destComps, copier);
00236 
00237 }
00238 
00239 template<class T> inline 
00240 void LevelData<T>::exchange(const Interval& comps)
00241 {
00242   // later on we can code this part as a direct algorithm
00243   // by copying and pasting the code from the Copier::define code
00244   // for now, just do the easy to debug approach.
00245   Copier copier(m_disjointBoxLayout, m_disjointBoxLayout, m_ghost);
00246   exchange(comps, copier);
00247 
00248   // if there aren't any ghost cells, there isn't really anything
00249   // to do here (also, if m_ghost == Zero, m_exchangeCopier
00250   // wasn't defined!
00251   //if (m_ghost != IntVect::Zero)
00252   //makeItSo(comps, *this, *this, comps, m_exchangeCopier);
00253 }
00254 
00255 template<class T> inline
00256 void LevelData<T>::exchange(const Interval& comps,
00257                             const Copier& copier)
00258 {
00259   makeItSo(comps, *this, *this, comps, copier);
00260 
00261 }
00262 
00263 template<class T> inline
00264 void LevelData<T>::makeItSo(const Interval&   a_srcComps, 
00265                             const LevelData<T>& a_src,
00266                             BoxLayoutData<T>& a_dest,
00267                             const Interval&   a_destComps,
00268                             const Copier&     a_copier) const
00269 {
00270   // The following five functions are nullOps in uniprocessor mode
00271 
00272   // Instead of doing this here, do it an end of makeItSo (ndk)
00273   //completePendingSends(); // wait for sends from possible previous operation
00274 
00275   // new evil logic to determine under what conditions we can just
00276   // re-use our messaging pattern and buffers from the last call to
00277   // makeItSo.  pretty elaborate, I know.  bvs
00278 #ifdef MPI
00279   static Copier* lastCopier=NULL;
00280   
00281     if(T::preAllocatable() == 2 || !a_copier.bufferAllocated() || 
00282          (m_fromMe.size() + m_toMe.size() == 0) ||lastCopier != &a_copier){
00283         allocateBuffers(a_src,  a_srcComps,
00284                                         a_dest, a_destComps,
00285                                         a_copier);  //monkey with buffers, set up 'fromMe' and 'toMe' queues
00286                 a_copier.setBufferAllocated(true);
00287           }
00288         lastCopier = (Copier*)(&a_copier);
00289 #endif
00290 
00291   writeSendDataFromMeIntoBuffers(a_src, a_srcComps);
00292 
00293 
00294   // If there is nothing to recv/send, don't go into these functions 
00295   // and allocate memory that will not be freed later.  (ndk)
00296   // The #ifdef MPI is for the m_toMe and m_fromMe
00297 #ifdef MPI
00298   if (m_toMe.size() > 0) {
00299     postReceivesToMe(); // all non-blocking
00300   }
00301 
00302   if (m_fromMe.size() > 0) {
00303     postSendsFromMe();  // all non-blocking
00304   }
00305 #endif
00306 
00307   //  computation that could occur during communication should really
00308   //  go here somehow.  while all the non-blocking sending and receiving is
00309   //  going on.  
00310   //
00311   //  my thought is to return from this function at this point an object
00312   //  that encapsulates the argument list above.  
00313   //  a "ChomboMessaging" object.
00314   //  The user can keep a reference
00315   //  to this object and do computations.  When they reach the limit of what
00316   //  they can compute without this communication completing, they call the
00317   //  "finalize()" function of their ChomboMessaging object and the rest of this
00318   //  code below gets executed.
00319   //  a real question though is: is there really enough computation to do while
00320   //  messaging is going on to justify the effort, and what machines really have
00321   //  good asynchronous messaging to make the work worthwhile.
00322   //  
00323   //  the other approach is to more finely decompose the overlapping of
00324   //  messaging and computation by using the ChomboMessaging object in the
00325   //  DataIterator construction.  The DataIterator returns T objects as they
00326   //  are completed from messaging.  This preserves almost all of the Chombo
00327   //  code as is but would be mucho tricky to actually implement and might only
00328   //  gain little.  This would not be a thing to try unitl Chombo is
00329   //  heavily instrumented for performance measuring.  in this design, unpackRecievesToMe()
00330   //  would become a complicated process interwoven with a DataIterator.
00331 
00332   //  postReceivesToMe();
00333 
00334   // perform local copy
00335   for(CopyIterator it(a_copier, CopyIterator::LOCAL); it.ok(); ++it)
00336     {
00337       const MotionItem& item = it();
00338       a_dest[item.toIndex].copy(item.fromRegion, 
00339                                 a_destComps,
00340                                 item.toRegion,
00341                                 a_src[item.fromIndex],
00342                                 a_srcComps);
00343     }
00344 
00345   // Uncomment and Move this out of unpackReceivesToMe()  (ndk)
00346   completePendingSends(); // wait for sends from possible previous operation
00347 
00348   unpackReceivesToMe(a_dest, a_destComps); // nullOp in uniprocessor mode
00349 
00350 }
00351 
00352 template<class T> inline
00353 void LevelData<T>::define(const BoxLayout& dp, int comps,  const DataFactory<T>& a_factory)
00354 {
00355   MayDay::Error("LevelData<T>::define called with BoxLayout input");
00356 }
00357 
00358 template<class T> inline
00359 void LevelData<T>::define(const BoxLayout& dp)
00360 {
00361   MayDay::Error("LevelData<T>::define called with BoxLayout input");
00362 }
00363 
00364 template<class T> inline
00365 void LevelData<T>::define(const BoxLayoutData<T>& da, const DataFactory<T>& a_factory )
00366 {
00367   MayDay::Error("LevelData<T>::define called with BoxLayout input");
00368 }
00369 
00370 template<class T> inline
00371 void LevelData<T>::define(const BoxLayoutData<T>& da, const Interval& comps,
00372                           const DataFactory<T>& a_factory)
00373 {
00374   MayDay::Error("LevelData<T>::define called with BoxLayout input");
00375 }
00376 
00377 template<class T> inline
00378 LevelData<T>::~LevelData()
00379 {
00380   completePendingSends();
00381   free(m_sendbuffer);
00382   free(m_recbuffer);
00383 }
00384 
00385 #ifndef MPI
00386 // uniprocessor version of all these nullop functions.
00387 template<class T> inline
00388 void LevelData<T>::completePendingSends() const
00389 {;}
00390 
00391 template<class T> inline
00392 void LevelData<T>::allocateBuffers(const LevelData<T>& a_src, 
00393                                    const Interval& a_srcComps,
00394                                    const BoxLayoutData<T>& a_dest,
00395                                    const Interval& a_destComps,
00396                                    const Copier&   a_copier) const
00397 {;}  
00398 
00399 template<class T> inline
00400 void LevelData<T>::writeSendDataFromMeIntoBuffers(const LevelData<T>& a_src, 
00401                                                   const Interval&     a_srcComps) const
00402 {;}
00403 
00404 template<class T> inline
00405 void LevelData<T>::postSendsFromMe() const
00406 {;}
00407 
00408 template<class T> inline
00409 void LevelData<T>::postReceivesToMe() const
00410 {;}
00411 
00412 template<class T> inline
00413 void LevelData<T>::unpackReceivesToMe(BoxLayoutData<T>& a_dest, 
00414                                       const Interval&   a_destComps) const
00415 {;}
00416 
00417 #else
00418 
00419 // MPI versions of the above codes.
00420 
00421 template<class T> inline
00422 void LevelData<T>::completePendingSends() const
00423 {
00424   if(numSends > 0){
00425     int result = MPI_Waitall(numSends, m_sendRequests, m_sendStatus);
00426     if(result != MPI_SUCCESS)
00427       {
00428         //hell if I know what to do about failed messaging here
00429       }
00430 
00431     delete[] m_sendRequests;
00432     delete[] m_sendStatus;
00433   }
00434   numSends = 0;
00435 }
00436 
00437 template<class T> inline
00438 void LevelData<T>::allocateBuffers(const LevelData<T>& a_src, 
00439                                    const Interval& a_srcComps,
00440                                    const BoxLayoutData<T>& a_dest,
00441                                    const Interval& a_destComps,
00442                                    const Copier&   a_copier) const
00443 {
00444   m_fromMe.resize(0);
00445   m_toMe.resize(0);
00446   size_t sendBufferSize = 0;
00447   size_t recBufferSize  = 0;
00448   // two versions of code here.  one for preAllocatable T, one not.
00449 
00450   //  T dummy;
00451   for(CopyIterator it(a_copier, CopyIterator::FROM); it.ok(); ++it)
00452     {
00453       const MotionItem& item = it();
00454       bufEntry b;
00455       b.item = &item;
00456       b.size = a_src[item.fromIndex].size(item.fromRegion, a_srcComps);
00457       sendBufferSize+=b.size;
00458       b.procID = item.procID;
00459       m_fromMe.push_back(b);
00460     }
00461   sort(m_fromMe.begin(), m_fromMe.end());
00462   for(CopyIterator it(a_copier, CopyIterator::TO); it.ok(); ++it)
00463     {
00464       const MotionItem& item = it();
00465       bufEntry b;
00466       b.item = &item;
00467       if (!(T::preAllocatable() == 2))
00468         {
00469           b.size = a_dest[item.toIndex].size(item.toRegion, a_destComps);
00470           recBufferSize+=b.size;
00471         }
00472       b.procID = item.procID;
00473       m_toMe.push_back(b);
00474     }
00475   sort(m_toMe.begin(), m_toMe.end());
00476 
00477   if(T::preAllocatable() == 2) // dynamic allocatable, need two pass
00478     {
00479       // in the non-preallocatable case, I need to message the
00480       // values for the m_toMe[*].size 
00481       if(m_fromMe.size() > 0)
00482         {
00483           MPI_Request nullrequest;
00484 
00485           int lastProc = -1;
00486           int messageIndex = 0;
00487           for(int i=0; i<m_fromMe.size(); ++i)
00488             {
00489               bufEntry& b = m_fromMe[i];
00490               if(b.procID == lastProc) messageIndex++;
00491               else                     messageIndex = 0;
00492               lastProc = b.procID;
00493               MPI_Isend(&(b.size), 1, MPI_INT, b.procID, 
00494                         messageIndex, Chombo_MPI::comm, &(nullrequest));
00495               MPI_Request_free(&(nullrequest));  // we don't wait on these sends, assume
00496               //    this memory really doesn't go anywhere.  Most MPI systems will fast message
00497               //    a single integer message
00498             }
00499         }
00500       if(m_toMe.size() > 0)
00501         {
00502           m_receiveRequests = new MPI_Request[m_toMe.size()];
00503           m_receiveStatus   = new MPI_Status[m_toMe.size()];
00504           int lastProc = -1;
00505           int messageIndex = 0;
00506           for(int i=0; i<m_toMe.size(); ++i)
00507             {
00508               bufEntry& b = m_toMe[i];
00509               if(b.procID == lastProc) messageIndex++;
00510               else                     messageIndex = 0;
00511               lastProc = b.procID;
00512               MPI_Irecv(&(b.size), 1, MPI_INT, b.procID, 
00513                         messageIndex, Chombo_MPI::comm, m_receiveRequests+i);
00514             }
00515 
00516           int result = MPI_Waitall(m_toMe.size(), m_receiveRequests, m_receiveStatus);
00517           if(result != MPI_SUCCESS)
00518             {
00519               MayDay::Error("First pass of two-phase communication failed");
00520             }
00521           for(int i=0; i<m_toMe.size(); ++i)  recBufferSize+= m_toMe[i].size;
00522           delete[] m_receiveRequests;
00523           delete[] m_receiveStatus;
00524         }
00525     }
00526 
00527   // allocate send and receveive buffer space.
00528 
00529   if(sendBufferSize > m_sendcapacity)
00530     {
00531       free(m_sendbuffer);
00532       m_sendbuffer = malloc(sendBufferSize);
00533       if(m_sendbuffer == NULL)
00534         {
00535           MayDay::Error("Out of memory in LevelData::allocatebuffers");
00536         }
00537       m_sendcapacity = sendBufferSize;
00538     }
00539 
00540   if(recBufferSize > m_reccapacity)
00541     {
00542       free(m_recbuffer);
00543       m_recbuffer = malloc(recBufferSize);
00544       if(m_recbuffer == NULL)
00545         {
00546           MayDay::Error("Out of memory in LevelData::allocatebuffers");
00547         }
00548       m_reccapacity = recBufferSize;
00549     }
00550 
00551 
00552   /*
00553   pout()<<"\n";
00554   for(int i=0; i<m_fromMe.size(); i++) 
00555     pout()<<m_fromMe[i].item->region<<"{"<<m_fromMe[i].procID<<"}"<<" ";
00556   pout() <<"::::";
00557   for(int i=0; i<m_toMe.size(); i++) 
00558     pout()<<m_toMe[i].item->region<<"{"<<m_toMe[i].procID<<"}"<<" ";
00559   pout() << endl;
00560   */
00561 
00562   char* nextFree = (char*)m_sendbuffer;
00563   if(m_fromMe.size() > 0)
00564   {
00565     for(unsigned int i=0; i<m_fromMe.size(); ++i)
00566       {
00567         m_fromMe[i].bufPtr = nextFree;
00568         nextFree += m_fromMe[i].size;
00569       }
00570   }
00571 
00572   nextFree = (char*)m_recbuffer;
00573   if(m_toMe.size() > 0)
00574   {
00575     for(unsigned int i=0; i<m_toMe.size(); ++i)
00576       {
00577         m_toMe[i].bufPtr = nextFree;
00578         nextFree += m_toMe[i].size;
00579       }
00580   }
00581 
00582   // since fromMe and toMe are sorted based on procID, messages can now be grouped
00583   // together on a per-processor basis.
00584 
00585 }
00586 
00587 
00588 
00589 template<class T> inline
00590 void LevelData<T>::writeSendDataFromMeIntoBuffers(const LevelData<T>& a_src, 
00591                                                   const Interval&     a_srcComps) const
00592 {
00593 
00594 
00595   for(unsigned int i=0; i<m_fromMe.size(); ++i)
00596     {
00597       const bufEntry& entry = m_fromMe[i];
00598       a_src[entry.item->fromIndex].linearOut(entry.bufPtr, entry.item->fromRegion, a_srcComps);
00599     }
00600 
00601 }
00602 
00603 template<class T> inline
00604 void LevelData<T>::postSendsFromMe() const
00605 {
00606 
00607   // now we get the magic of message coalescence
00608   // fromMe has already been sorted in the allocateBuffers() step.
00609 
00610   numSends = m_fromMe.size();
00611   if(numSends > 1){
00612   for(unsigned int i=m_fromMe.size()-1; i>0; --i)
00613     {
00614       if(m_fromMe[i].procID == m_fromMe[i-1].procID) 
00615         {
00616           numSends--;
00617           m_fromMe[i-1].size+=m_fromMe[i].size;
00618           m_fromMe[i].size = 0;
00619         }
00620     }
00621   }
00622   m_sendRequests = new MPI_Request[numSends];
00623   m_sendStatus = new MPI_Status[numSends];
00624 
00625 
00626   unsigned int next=0;
00627   for(int i=0; i<numSends; ++i)
00628     {
00629       const bufEntry& entry = m_fromMe[next];
00630       // cout<<procID()<< ": sending message of "<<entry.size;
00631       // cout<<" to proc "<<  entry.procID<<endl;
00632       MPI_Isend(entry.bufPtr, entry.size, MPI_BYTE, entry.procID, 
00633                 0, Chombo_MPI::comm, m_sendRequests+i);
00634       ++next;
00635       while(next < m_fromMe.size() && m_fromMe[next].size == 0) ++next;
00636     }
00637 
00638 
00639 }
00640 
00641 template<class T> inline
00642 void LevelData<T>::postReceivesToMe() const
00643 {
00644   numReceives = m_toMe.size();
00645 
00646   if(numReceives > 1){
00647   for(unsigned int i=m_toMe.size()-1; i>0; --i)
00648     {
00649       if(m_toMe[i].procID == m_toMe[i-1].procID) 
00650         {
00651           numReceives--;
00652           m_toMe[i-1].size+=m_toMe[i].size;
00653           m_toMe[i].size = 0;
00654         }
00655     }
00656   }
00657   m_receiveRequests = new MPI_Request[numReceives];
00658   m_receiveStatus = new MPI_Status[numReceives];
00659 
00660 
00661   unsigned int next=0;
00662   for(int i=0; i<numReceives; ++i)
00663     {
00664       const bufEntry& entry = m_toMe[next];
00665       //cout<<procID()<< ": receiving message of "<<entry.size;
00666       //cout<<" from proc "<<  entry.procID<<endl;
00667       MPI_Irecv(entry.bufPtr, entry.size, MPI_BYTE, entry.procID, 
00668                 0, Chombo_MPI::comm, m_receiveRequests+i);
00669       ++next;
00670       while(next < m_toMe.size() && m_toMe[next].size == 0) ++next;
00671     }
00672 
00673 }
00674 
00675 
00676 template<class T> inline
00677 void LevelData<T>::unpackReceivesToMe(BoxLayoutData<T>& a_dest, 
00678                                       const Interval&   a_destComps) const
00679 {
00680 
00681   if(numReceives > 0){
00682     int result = MPI_Waitall(numReceives, m_receiveRequests, m_receiveStatus);
00683     if(result != MPI_SUCCESS)
00684       {
00685         //hell if I know what to do about failed messaging here
00686       }
00687 
00688     for(unsigned int i=0; i<m_toMe.size(); ++i)
00689       {
00690         const bufEntry& entry = m_toMe[i];
00691         a_dest[entry.item->toIndex].linearIn(entry.bufPtr, entry.item->toRegion, a_destComps);
00692       }
00693 
00694     delete[] m_receiveRequests;
00695     delete[] m_receiveStatus;
00696   }
00697   numReceives = 0;
00698 }
00699 
00700 #endif
00701 
00702 
00703 
00704 
00705 
00706 
00707 

Generated on Wed Apr 16 14:31:05 2003 for EBChombo by doxygen1.2.16