Main Page | Modules | Namespace List | Class Hierarchy | Alphabetical List | Class List | Directories | File List | Class Members | File Members

BoxLayoutDataI.H

Go to the documentation of this file.
00001 /*   _______              __
00002     / ___/ /  ___  __ _  / /  ___
00003    / /__/ _ \/ _ \/  V \/ _ \/ _ \
00004    \___/_//_/\___/_/_/_/_.__/\___/
00005 */
00006 // CHOMBO Copyright (c) 2000-2004, The Regents of the University of
00007 // California, through Lawrence Berkeley National Laboratory (subject to
00008 // receipt of any required approvals from U.S. Dept. of Energy).  All
00009 // rights reserved.
00010 //
00011 // Redistribution and use in source and binary forms, with or without
00012 // modification, are permitted provided that the following conditions are met:
00013 //
00014 // (1) Redistributions of source code must retain the above copyright
00015 // notice, this list of conditions and the following disclaimer.
00016 // (2) Redistributions in binary form must reproduce the above copyright
00017 // notice, this list of conditions and the following disclaimer in the
00018 // documentation and/or other materials provided with the distribution.
00019 // (3) Neither the name of Lawrence Berkeley National Laboratory, U.S.
00020 // Dept. of Energy nor the names of its contributors may be used to endorse
00021 // or promote products derived from this software without specific prior
00022 // written permission.
00023 //
00024 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00025 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
00026 // TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
00027 // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
00028 // OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00029 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
00030 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00031 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00032 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00033 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00034 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00035 //
00036 // You are under no obligation whatsoever to provide any bug fixes,
00037 // patches, or upgrades to the features, functionality or performance of
00038 // the source code ("Enhancements") to anyone; however, if you choose to
00039 // make your Enhancements available either publicly, or directly to
00040 // Lawrence Berkeley National Laboratory, without imposing a separate
00041 // written license agreement for such Enhancements, then you hereby grant
00042 // the following license: a non-exclusive, royalty-free perpetual license
00043 // to install, use, modify, prepare derivative works, incorporate into
00044 // other computer software, distribute, and sublicense such Enhancements or
00045 // derivative works thereof, in binary and source code form.
00046 //
00047 // TRADEMARKS. Product and company names mentioned herein may be the
00048 // trademarks of their respective owners.  Any rights not expressly granted
00049 // herein are reserved.
00050 //
00051 
00052 #ifndef _BOXLAYOUTDATAI_H_
00053 #define _BOXLAYOUTDATAI_H_
00054 
00055 #include <cstdlib>
00056 #include <algorithm>
00057 #include <limits.h>
00058 
00059 #include "parstream.H"
00060 #include "memtrack.H"
00061 
00062 using std::sort;
00063 
00064 template<class T>
00065 void BoxLayoutData<T>::makeItSo(const Interval&   a_srcComps,
00066                             const BoxLayoutData<T>& a_src,
00067                             BoxLayoutData<T>& a_dest,
00068                             const Interval&   a_destComps,
00069                             const Copier&     a_copier,
00070                             const LDOperator<T>& a_op) const
00071 {
00072   // The following five functions are nullOps in uniprocessor mode
00073 
00074   // Instead of doing this here, do it an end of makeItSo (ndk)
00075   //completePendingSends(); // wait for sends from possible previous operation
00076 
00077   // new evil logic to determine under what conditions we can just
00078   // re-use our messaging pattern and buffers from the last call to
00079   // makeItSo.  pretty elaborate, I know.  bvs
00080 #ifdef CH_MPI
00081   static Copier* lastCopier=NULL;
00082 
00083 #ifndef NDEBUG
00084   //verifyCommunications();
00085 #endif
00086 
00087   if(T::preAllocatable() == 2 || !a_copier.bufferAllocated() ||
00088      (m_fromMe.size() + m_toMe.size() == 0) ||lastCopier != &a_copier){
00089     allocateBuffers(a_src,  a_srcComps,
00090                     a_dest, a_destComps,
00091                     a_copier,
00092                     a_op);  //monkey with buffers, set up 'fromMe' and 'toMe' queues
00093     a_copier.setBufferAllocated(true);
00094   }
00095   lastCopier = (Copier*)(&a_copier);
00096 
00097 #endif
00098 
00099   writeSendDataFromMeIntoBuffers(a_src, a_srcComps, a_op);
00100 
00101   // If there is nothing to recv/send, don't go into these functions
00102   // and allocate memory that will not be freed later.  (ndk)
00103   // The #ifdef CH_MPI is for the m_toMe and m_fromMe
00104 #ifdef CH_MPI
00105   this->numReceives = m_toMe.size();
00106   if (this->numReceives > 0) {
00107     postReceivesToMe(); // all non-blocking
00108   }
00109 
00110   this->numSends = m_fromMe.size();
00111   if (this->numSends > 0) {
00112     postSendsFromMe();  // all non-blocking
00113   }
00114 #endif
00115 
00116   //  computation that could occur during communication should really
00117   //  go here somehow.  while all the non-blocking sending and receiving is
00118   //  going on.
00119   //
00120   //  my thought is to return from this function at this point an object
00121   //  that encapsulates the argument list above.
00122   //  a "ChomboMessaging" object.
00123   //  The user can keep a reference
00124   //  to this object and do computations.  When they reach the limit of what
00125   //  they can compute without this communication completing, they call the
00126   //  "finalize()" function of their ChomboMessaging object and the rest of this
00127   //  code below gets executed.
00128   //  a real question though is: is there really enough computation to do while
00129   //  messaging is going on to justify the effort, and what machines really have
00130   //  good asynchronous messaging to make the work worthwhile.
00131   //
00132   //  the other approach is to more finely decompose the overlapping of
00133   //  messaging and computation by using the ChomboMessaging object in the
00134   //  DataIterator construction.  The DataIterator returns T objects as they
00135   //  are completed from messaging.  This preserves almost all of the Chombo
00136   //  code as is but would be mucho tricky to actually implement and might only
00137   //  gain little.  This would not be a thing to try unitl Chombo is
00138   //  heavily instrumented for performance measuring.  in this design, unpackRecievesToMe()
00139   //  would become a complicated process interwoven with a DataIterator.
00140 
00141   //  postReceivesToMe();
00142 
00143   // perform local copy
00144   for(CopyIterator it(a_copier, CopyIterator::LOCAL); it.ok(); ++it)
00145     {
00146       const MotionItem& item = it();
00147       a_op.op(a_dest[item.toIndex], item.fromRegion,
00148               a_destComps,
00149               item.toRegion,
00150               a_src[item.fromIndex],
00151               a_srcComps);
00152     }
00153 
00154   // Uncomment and Move this out of unpackReceivesToMe()  (ndk)
00155   completePendingSends(); // wait for sends from possible previous operation
00156 
00157   unpackReceivesToMe(a_dest, a_destComps, a_op); // nullOp in uniprocessor mode
00158 
00159 }
00160 
00161 #ifndef CH_MPI
00162 // uniprocessor version of all these nullop functions.
00163 template<class T>
00164 void BoxLayoutData<T>::completePendingSends() const
00165 {}
00166 
00167 template<class T>
00168 void BoxLayoutData<T>::allocateBuffers(const BoxLayoutData<T>& a_src,
00169                                    const Interval& a_srcComps,
00170                                    const BoxLayoutData<T>& a_dest,
00171                                    const Interval& a_destComps,
00172                                    const Copier&   a_copier,
00173                                    const LDOperator<T>& a_op
00174                                    ) const
00175 {}
00176 
00177 template<class T>
00178 void BoxLayoutData<T>::writeSendDataFromMeIntoBuffers(const BoxLayoutData<T>& a_src,
00179                                                   const Interval&     a_srcComps,
00180                                                   const LDOperator<T>& a_op) const
00181 {}
00182 
00183 template<class T>
00184 void BoxLayoutData<T>::postSendsFromMe() const
00185 {}
00186 
00187 template<class T>
00188 void BoxLayoutData<T>::postReceivesToMe() const
00189 {}
00190 
00191 template<class T>
00192 void BoxLayoutData<T>::unpackReceivesToMe(BoxLayoutData<T>& a_dest,
00193                                       const Interval&   a_destComps,
00194                                       const LDOperator<T>& a_op) const
00195 {}
00196 
00197 template<class T>
00198 void BoxLayoutData<T>::unpackReceivesToMe_append(LayoutData<Vector<RefCountedPtr<T> > >& a_dest,
00199                                                  const Interval&   a_destComps,
00200                                                  int ncomp,
00201                                                  const DataFactory<T>& factory,
00202                                                  const LDOperator<T>& a_op) const
00203 {}
00204 
00205 #else
00206 
00207 // MPI versions of the above codes.
00208 
00209 template<class T>
00210 void BoxLayoutData<T>::completePendingSends() const
00211 {
00212   if(this->numSends > 0){
00213     int result = MPI_Waitall(this->numSends, m_sendRequests, m_sendStatus);
00214     if(result != MPI_SUCCESS)
00215       {
00216         //hell if I know what to do about failed messaging here
00217       }
00218 
00219     delete[] m_sendRequests;
00220     delete[] m_sendStatus;
00221   }
00222   this->numSends = 0;
00223 }
00224 
00225 
00226 template<class T>
00227 void BoxLayoutData<T>::allocateBuffers(const BoxLayoutData<T>& a_src,
00228                                    const Interval& a_srcComps,
00229                                    const BoxLayoutData<T>& a_dest,
00230                                    const Interval& a_destComps,
00231                                    const Copier&   a_copier,
00232                                    const LDOperator<T>& a_op) const
00233 {
00234   m_fromMe.resize(0);
00235   m_toMe.resize(0);
00236   size_t sendBufferSize = 0;
00237   size_t recBufferSize  = 0;
00238   // two versions of code here.  one for preAllocatable T, one not.
00239 
00240   T dummy;
00241   for(CopyIterator it(a_copier, CopyIterator::FROM); it.ok(); ++it)
00242     {
00243       const MotionItem& item = it();
00244       bufEntry b;
00245       b.item = &item;
00246       b.size = a_op.size(a_src[item.fromIndex], item.fromRegion, a_srcComps);
00247       sendBufferSize+=b.size;
00248       b.procID = item.procID;
00249       m_fromMe.push_back(b);
00250     }
00251   sort(m_fromMe.begin(), m_fromMe.end());
00252   for(CopyIterator it(a_copier, CopyIterator::TO); it.ok(); ++it)
00253     {
00254       const MotionItem& item = it();
00255       bufEntry b;
00256       b.item = &item;
00257       if(T::preAllocatable() == 0)
00258         {
00259           b.size = a_op.size(dummy, item.toRegion, a_destComps);
00260           recBufferSize+=b.size;
00261         }
00262       else if (T::preAllocatable() == 1)
00263         {
00264           b.size = a_op.size(a_dest[item.toIndex], item.toRegion, a_destComps);
00265           recBufferSize+=b.size;
00266         }
00267       b.procID = item.procID;
00268       m_toMe.push_back(b);
00269     }
00270   sort(m_toMe.begin(), m_toMe.end());
00271 
00272   if(T::preAllocatable() == 2) // dynamic allocatable, need two pass
00273     {
00274       // in the non-preallocatable case, I need to message the
00275       // values for the m_toMe[*].size
00276       Vector<unsigned long> fdata;
00277       Vector<unsigned long> tdata;
00278       int count = 1;
00279       int scount = 1;
00280       if(m_toMe.size() > 0)
00281         {
00282           tdata.resize(m_toMe.size(), ULONG_MAX);
00283           m_receiveRequests = new MPI_Request[numProc()-1];
00284           m_receiveStatus   = new MPI_Status [numProc()-1];
00285           MPI_Request* Rptr = m_receiveRequests;
00286 
00287           int lastProc = m_toMe[0].procID;
00288           int messageSize = 1;
00289           unsigned long * dataPtr = &(tdata[0]);
00290           int i = 1;
00291           
00292           for(;i<m_toMe.size(); ++i)
00293             {
00294               bufEntry& b = m_toMe[i];
00295               if(b.procID == lastProc) 
00296                 messageSize++;
00297               else
00298                 {                     
00299                  
00300                   MPI_Irecv(dataPtr, messageSize, MPI_UNSIGNED_LONG, lastProc,
00301                             1, Chombo_MPI::comm, Rptr);
00302                   Rptr++;
00303               
00304                   lastProc = b.procID;
00305                   messageSize = 1;
00306                   dataPtr = &(tdata[i]);
00307                   count++;
00308                 }
00309             }
00310           
00311           MPI_Irecv(dataPtr, messageSize, MPI_UNSIGNED_LONG, lastProc,
00312                     1, Chombo_MPI::comm, Rptr );
00313 
00314          
00315         }
00316       if(m_fromMe.size() > 0)
00317         {
00318           fdata.resize(m_fromMe.size());
00319           fdata[0]=m_fromMe[0].size;
00320           m_sendRequests = new MPI_Request[numProc()-1];
00321           m_sendStatus   = new MPI_Status [numProc()-1];
00322           MPI_Request* Rptr = m_sendRequests;
00323 
00324           int lastProc = m_fromMe[0].procID;
00325           int messageSize = 1;
00326           unsigned long * dataPtr = &(fdata[0]);
00327           int i = 1;
00328           for(;i<m_fromMe.size(); ++i)
00329             {
00330               fdata[i]    = m_fromMe[i].size;
00331               bufEntry& b = m_fromMe[i];
00332               if(b.procID == lastProc) 
00333                 messageSize++;
00334               else
00335                 {                     
00336                 
00337                   MPI_Isend(dataPtr, messageSize, MPI_UNSIGNED_LONG, lastProc,
00338                             1, Chombo_MPI::comm, Rptr);
00339                  
00340                 
00341                   Rptr++;
00342                   lastProc = b.procID;
00343                   messageSize = 1;
00344                   dataPtr = &(fdata[i]);
00345                   scount++;
00346                 }
00347             }
00348    
00349           MPI_Isend(dataPtr, messageSize, MPI_UNSIGNED_LONG, lastProc,
00350                     1, Chombo_MPI::comm, Rptr);
00351      
00352           
00353         }
00354 
00355       if(m_toMe.size() > 0)
00356         {
00357           
00358           int result = MPI_Waitall(count, m_receiveRequests, m_receiveStatus);
00359           if(result != MPI_SUCCESS)
00360             {
00361               MayDay::Error("First pass of two-phase communication failed");
00362             }
00363           
00364           for(int i=0; i<m_toMe.size(); ++i) {
00365             CH_assert(tdata[i] != ULONG_MAX);
00366             m_toMe[i].size = tdata[i];
00367             recBufferSize+= tdata[i];
00368           }
00369           
00370           delete[] m_receiveRequests;
00371           delete[] m_receiveStatus;
00372         }
00373       if(m_fromMe.size() > 0)
00374         {
00375                    
00376           int result = MPI_Waitall(scount, m_sendRequests, m_sendStatus);
00377           if(result != MPI_SUCCESS)
00378             {
00379               MayDay::Error("First pass of two-phase communication failed");
00380             }
00381           delete[] m_sendRequests;
00382           delete[] m_sendStatus;
00383         }
00384     }
00385 
00386   // allocate send and receveive buffer space.
00387 
00388   if(sendBufferSize > m_sendcapacity)
00389     {
00390       free(m_sendbuffer);
00391       m_sendbuffer = malloc(sendBufferSize);
00392       if(m_sendbuffer == NULL)
00393         {
00394           MayDay::Error("Out of memory in BoxLayoutData::allocatebuffers");
00395         }
00396       m_sendcapacity = sendBufferSize;
00397     }
00398 
00399   if(recBufferSize > m_reccapacity)
00400     {
00401       free(m_recbuffer);
00402       m_recbuffer = malloc(recBufferSize);
00403       if(m_recbuffer == NULL)
00404         {
00405           MayDay::Error("Out of memory in BoxLayoutData::allocatebuffers");
00406         }
00407       m_reccapacity = recBufferSize;
00408     }
00409 
00410   /*
00411     pout()<<"\n";
00412     for(int i=0; i<m_fromMe.size(); i++)
00413     pout()<<m_fromMe[i].item->region<<"{"<<m_fromMe[i].procID<<"}"<<" ";
00414     pout() <<"::::";
00415     for(int i=0; i<m_toMe.size(); i++)
00416     pout()<<m_toMe[i].item->region<<"{"<<m_toMe[i].procID<<"}"<<" ";
00417     pout() << endl;
00418   */
00419 
00420   char* nextFree = (char*)m_sendbuffer;
00421   if(m_fromMe.size() > 0)
00422     {
00423       for(unsigned int i=0; i<m_fromMe.size(); ++i)
00424         {
00425           m_fromMe[i].bufPtr = nextFree;
00426           nextFree += m_fromMe[i].size;
00427         }
00428     }
00429 
00430   nextFree = (char*)m_recbuffer;
00431   if(m_toMe.size() > 0)
00432     {
00433       for(unsigned int i=0; i<m_toMe.size(); ++i)
00434         {
00435           m_toMe[i].bufPtr = nextFree;
00436           nextFree += m_toMe[i].size;
00437         }
00438     }
00439 
00440   // since fromMe and toMe are sorted based on procID, messages can now be grouped
00441   // together on a per-processor basis.
00442 
00443 }
00444 
00445 template<class T>
00446 void BoxLayoutData<T>::writeSendDataFromMeIntoBuffers(const BoxLayoutData<T>& a_src,
00447                                                   const Interval&     a_srcComps,
00448                                                   const LDOperator<T>& a_op) const
00449 {
00450   for(unsigned int i=0; i<m_fromMe.size(); ++i)
00451     {
00452       const bufEntry& entry = m_fromMe[i];
00453       a_op.linearOut(a_src[entry.item->fromIndex], entry.bufPtr,
00454                      entry.item->fromRegion, a_srcComps);
00455     }
00456 }
00457 
00458 template<class T>
00459 void BoxLayoutData<T>::postSendsFromMe() const
00460 {
00461 
00462   // now we get the magic of message coalescence
00463   // fromMe has already been sorted in the allocateBuffers() step.
00464 
00465   this->numSends = m_fromMe.size();
00466   if(this->numSends > 1){
00467     for(unsigned int i=m_fromMe.size()-1; i>0; --i)
00468       {
00469         if(m_fromMe[i].procID == m_fromMe[i-1].procID)
00470           {
00471             this->numSends--;
00472             m_fromMe[i-1].size+=m_fromMe[i].size;
00473             m_fromMe[i].size = 0;
00474           }
00475       }
00476   }
00477   m_sendRequests = new MPI_Request[this->numSends];
00478   m_sendStatus = new MPI_Status[this->numSends];
00479 
00480   unsigned int next=0;
00481   for(int i=0; i<this->numSends; ++i)
00482     {
00483       const bufEntry& entry = m_fromMe[next];
00484       // cout<<procID()<< ": sending message of "<<entry.size;
00485       // cout<<" to proc "<<  entry.procID<<endl;
00486       MPI_Isend(entry.bufPtr, entry.size, MPI_BYTE, entry.procID,
00487                 0, Chombo_MPI::comm, m_sendRequests+i);
00488       ++next;
00489       while(next < m_fromMe.size() && m_fromMe[next].size == 0) ++next;
00490     }
00491 }
00492 
00493 template<class T>
00494 void BoxLayoutData<T>::postReceivesToMe() const
00495 {
00496   this->numReceives = m_toMe.size();
00497 
00498   if(this->numReceives > 1){
00499     for(unsigned int i=m_toMe.size()-1; i>0; --i)
00500       {
00501         if(m_toMe[i].procID == m_toMe[i-1].procID)
00502           {
00503             this->numReceives--;
00504             m_toMe[i-1].size+=m_toMe[i].size;
00505             m_toMe[i].size = 0;
00506           }
00507       }
00508   }
00509   m_receiveRequests = new MPI_Request[this->numReceives];
00510   m_receiveStatus = new MPI_Status[this->numReceives];
00511 
00512   unsigned int next=0;
00513   for(int i=0; i<this->numReceives; ++i)
00514     {
00515       const bufEntry& entry = m_toMe[next];
00516       //cout<<procID()<< ": receiving message of "<<entry.size;
00517       //cout<<" from proc "<<  entry.procID<<endl;
00518       MPI_Irecv(entry.bufPtr, entry.size, MPI_BYTE, entry.procID,
00519                 0, Chombo_MPI::comm, m_receiveRequests+i);
00520       ++next;
00521       while(next < m_toMe.size() && m_toMe[next].size == 0) ++next;
00522     }
00523 
00524 }
00525 
00526 template<class T>
00527 void BoxLayoutData<T>::unpackReceivesToMe(BoxLayoutData<T>& a_dest,
00528                                       const Interval&   a_destComps,
00529                                       const LDOperator<T>& a_op) const
00530 {
00531 
00532   if(this->numReceives > 0){
00533     int result = MPI_Waitall(this->numReceives, m_receiveRequests, m_receiveStatus);
00534     if(result != MPI_SUCCESS)
00535       {
00536         //hell if I know what to do about failed messaging here
00537       }
00538 
00539     for(unsigned int i=0; i<m_toMe.size(); ++i)
00540       {
00541         const bufEntry& entry = m_toMe[i];
00542         a_op.linearIn(a_dest[entry.item->toIndex], entry.bufPtr, entry.item->toRegion, a_destComps);
00543       }
00544 
00545     delete[] m_receiveRequests;
00546     delete[] m_receiveStatus;
00547   }
00548   this->numReceives = 0;
00549 }
00550 
00551 template<class T>
00552 void BoxLayoutData<T>::unpackReceivesToMe_append(LayoutData<Vector<RefCountedPtr<T> > >& a_dest,
00553                                                  const Interval&   a_destComps,
00554                                                  int ncomp,
00555                                                  const DataFactory<T>& factory,
00556 
00557                                                  const LDOperator<T>& a_op) const
00558 {
00559 
00560   if(this->numReceives > 0){
00561     int result = MPI_Waitall(this->numReceives, m_receiveRequests, m_receiveStatus);
00562     if(result != MPI_SUCCESS)
00563       {
00564         //hell if I know what to do about failed messaging here
00565       }
00566 
00567     for(unsigned int i=0; i<m_toMe.size(); ++i)
00568       {
00569         const bufEntry& entry = m_toMe[i];
00570         const MotionItem& item = *(entry.item);
00571         RefCountedPtr<T> newT = factory.create(item.toRegion, ncomp, item.toIndex);
00572 
00573         a_op.linearIn(*newT, entry.bufPtr, item.toRegion, a_destComps);
00574         a_dest[item.toIndex].push_back(newT);
00575       }
00576 
00577     delete[] m_receiveRequests;
00578     delete[] m_receiveStatus;
00579   }
00580   this->numReceives = 0;
00581 }
00582 #endif
00583 
00584 template <class T>
00585 void BoxLayoutData<T>::generalCopyTo(const BoxLayout& a_destGrids,
00586                                      LayoutData<Vector<RefCountedPtr<T> > >& a_dest,
00587                                      const Interval& a_srcComps,
00588                                      const ProblemDomain& a_domain,
00589                                      const DataFactory<T>& factory) const
00590 {
00591 
00592  CH_assert(T::preAllocatable() == 0);
00593   LDOperator<T> a_op;
00594 
00595   a_dest.define(a_destGrids);
00596   Copier copier;
00597   copier.define(this->m_boxLayout, a_destGrids, a_domain, IntVect::Zero);
00598 
00599   int ncomp = a_srcComps.size();
00600   Interval destComps(0, ncomp-1);
00601   allocateBuffers(*this,  a_srcComps,
00602                   *this,  destComps,
00603                   copier, a_op);
00604 
00605   writeSendDataFromMeIntoBuffers(*this, a_srcComps, a_op);
00606 
00607   // If there is nothing to recv/send, don't go into these functions
00608   // and allocate memory that will not be freed later.  (ndk)
00609   // The #ifdef CH_MPI is for the m_toMe and m_fromMe
00610 #ifdef CH_MPI
00611   this->numReceives = m_toMe.size();
00612   if (this->numReceives > 0) {
00613     postReceivesToMe(); // all non-blocking
00614   }
00615 
00616   this->numSends = m_fromMe.size();
00617   if (this->numSends > 0) {
00618     postSendsFromMe();  // all non-blocking
00619   }
00620 #endif
00621 
00622     // perform local copy
00623   for(CopyIterator it(copier, CopyIterator::LOCAL); it.ok(); ++it)
00624     {
00625       const MotionItem& item = it();
00626       RefCountedPtr<T> newT = factory.create(item.toRegion, ncomp, item.toIndex);
00627 
00628       a_op.op(*newT, item.fromRegion,
00629               destComps,
00630               item.toRegion,
00631               this->operator[](item.fromIndex),
00632               a_srcComps);
00633       a_dest[item.toIndex].push_back(newT);
00634     }
00635 
00636   // Uncomment and Move this out of unpackReceivesToMe()  (ndk)
00637   completePendingSends(); // wait for sends from possible previous operation
00638 
00639   unpackReceivesToMe_append(a_dest, destComps, ncomp, factory, a_op); // nullOp in uniprocessor mode
00640 }
00641 
00642 #endif

Generated on Wed Oct 5 13:52:08 2005 for Chombo&AMRSelfGravity by  doxygen 1.4.1