@@ -585,6 +585,7 @@ void MPMesh::startCommunication(){
585585 elm2global);
586586
587587 // Do Map of Global To Local ID
588+ // TODO make ordered map; which faster
588589 std::unordered_map<int , int > global2local;
589590 for (int iEnt = 0 ; iEnt < numElements; iEnt++) {
590591 int globalID = elm2global_host (iEnt);
@@ -602,22 +603,30 @@ void MPMesh::startCommunication(){
602603 // # Owner Entities which are halo entities somewhere else
603604 MPI_Alltoall (numOwnersOnOtherProcs.data (), 1 , MPI_INT, numHalosOnOtherProcs.data (), 1 , MPI_INT, comm);
604605
605- // Send Halo Entity Global Id To Owning Process
606+ // Send Halo Entity Global & Local Id To Owning Process
606607 std::vector<std::vector<int >> sendBufs (numProcsTot);
608+ for (int proc = 0 ; proc < numProcsTot; proc++)
609+ sendBufs[proc].reserve (2 *numOwnersOnOtherProcs[proc]);
610+
607611 for (int iEnt=numOwnersTot; iEnt<numOwnersTot+numHalosTot; iEnt++) {
608612 auto ownerProc = elmOwners_host (iEnt);
609613 assert (ownerProc != self);
610614 sendBufs[ownerProc].push_back (elm2global_host (iEnt));
615+ sendBufs[ownerProc].push_back (iEnt);
611616 }
612- // Requests
617+
618+ MPI_Barrier (comm);
619+ printf (" Done 0\n " );
620+ // Requests
613621 std::vector<MPI_Request> requests;
622+ requests.reserve (2 *numProcsTot);
614623 // Receive Calls
615624 std::vector<std::vector<int >> recvBufs (numProcsTot);
616625 for (int proc = 0 ; proc < numProcsTot; proc++) {
617626 if (numHalosOnOtherProcs[proc] > 0 ) {
618- recvBufs[proc].resize (numHalosOnOtherProcs[proc]);
627+ recvBufs[proc].resize (2 * numHalosOnOtherProcs[proc]);
619628 MPI_Request req;
620- MPI_Irecv (recvBufs[proc].data (), numHalosOnOtherProcs[proc], MPI_INT, proc, MPI_ANY_TAG, comm, &req);
629+ MPI_Irecv (recvBufs[proc].data (), 2 * numHalosOnOtherProcs[proc], MPI_INT, proc, MPI_ANY_TAG, comm, &req);
621630 requests.push_back (req);
622631 }
623632 }
@@ -632,6 +641,8 @@ void MPMesh::startCommunication(){
632641 MPI_Waitall (requests.size (), requests.data (), MPI_STATUSES_IGNORE);
633642 requests.clear ();
634643
644+ MPI_Barrier (comm);
645+ printf (" Done 1\n " );
635646 // Now the owner process needs to look at these globalIDs convert them to localIds and send it back
636647 // recvBufs[p] contains global IDs of elements that halo rank p needs
637648 // numHalosOnOtherProcs[p] tells how many to expect from proc p
@@ -640,12 +651,19 @@ void MPMesh::startCommunication(){
640651 if (numHalosOnOtherProcs[proc] > 0 ) {
641652 localIDBufs[proc].resize (numHalosOnOtherProcs[proc]);
642653 for (int i = 0 ; i < numHalosOnOtherProcs[proc]; i++) {
643- int globalID = recvBufs[proc][i];
654+ int globalID = recvBufs[proc][i* 2 ];
644655 localIDBufs[proc][i] = global2local[globalID]; // Convert to localID
645656 }
646657 }
647658 }
648659
660+ // Owning process gets the global and local Id from halo process
661+ // Conver Global to local in this process and also local ID in halo process
662+ // So TODO make ownerTOHaloMap <int, int>
663+
664+
665+
666+ // Sends back localID of the owned cells so that HaloToOwner can be done for halo processes
649667 for (int proc = 0 ; proc < numProcsTot; proc++) {
650668 if (!localIDBufs[proc].empty ()) {
651669 MPI_Request req;
@@ -666,6 +684,7 @@ void MPMesh::startCommunication(){
666684 }
667685 MPI_Waitall (requests.size (), requests.data (), MPI_STATUSES_IGNORE);
668686
687+ /*
669688 //Debugging
670689 printf("Rank %d Owners %d Halos %d Total %d \n", self, numOwnersTot, numHalosTot, numElements);
671690 for (int i=0; i<numProcsTot; i++){
@@ -703,7 +722,7 @@ void MPMesh::startCommunication(){
703722 printf("Owner LID in rank 0 %d \n", haloLocalIDs[1][i]);
704723 }
705724 MPI_Barrier(comm);
706-
725+ */
707726}
708727
709728/*
0 commit comments