Skip to content

Commit 501a5ed

Browse files
committed
Removed pair<int, int>, instead 2 vectors storing ownerOwnerLocalIDs and ownerHaloLocalIDs. thus no need to create send vector at each time step, Optimize#1
1 parent 9f8bbdd commit 501a5ed

File tree

2 files changed

+22
-44
lines changed

2 files changed

+22
-44
lines changed

src/pmpo_MPMesh.hpp

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,12 +34,19 @@ class MPMesh{
3434

3535
std::map<MeshFieldIndex, std::function<void()>> reconstructSlice = std::map<MeshFieldIndex, std::function<void()>>();
3636

37+
//For MPI Communication
3738
int numOwnersTot, numHalosTot;
3839
std::vector<int> numOwnersOnOtherProcs;
3940
std::vector<int> numHalosOnOtherProcs;
4041
std::vector<int>haloOwnerProcs;
42+
4143
std::vector<std::vector<int>> haloOwnerLocalIDs;
42-
std::vector<std::vector<std::pair<int,int>>> ownerToHalos;
44+
45+
//Halos send to owners globalIDs which are converted to local IDs of owners
46+
//Also keep haloLocalID in owner
47+
std::vector<std::vector<int>> ownerOwnerLocalIDs;
48+
std::vector<std::vector<int>> ownerHaloLocalIDs;
49+
4350
void startCommunication();
4451
void communicateFields(const std::vector<std::vector<double>>& fieldData, const int numEntities, const int numEntries, int mode,
4552
std::vector<std::vector<int>>& recvIDVec, std::vector<std::vector<double>>& recvDataVec);

src/pmpo_MPMesh_assembly.hpp

Lines changed: 14 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -581,9 +581,6 @@ void MPMesh::startCommunication(){
581581
haloOwnerProcs.reserve(numHalosTot);
582582
haloOwnerLocalIDs.resize(numProcsTot);
583583

584-
//Pair has haloLocalId and ownerLocalId
585-
ownerToHalos.resize(numProcsTot);
586-
587584
//Copy owning processes and globalIds to CPU
588585
auto entOwners_host = Kokkos::create_mirror_view_and_copy(Kokkos::DefaultHostExecutionSpace::memory_space(),
589586
entOwners);
@@ -648,26 +645,17 @@ void MPMesh::startCommunication(){
648645
//Now the owner process needs to look at these globalIDs convert them to localIds and send it back
649646
//recvBufs[p] contains global IDs of elements that halo rank p needs
650647
//numHalosOnOtherProcs[p] tells how many to expect from proc p
651-
std::vector<std::vector<int>> localIDBufs(numProcsTot);
648+
ownerOwnerLocalIDs.resize(numProcsTot);
649+
ownerHaloLocalIDs.resize(numProcsTot);
650+
652651
for (int proc = 0; proc < numProcsTot; proc++) {
653652
if (numHalosOnOtherProcs[proc] > 0) {
654-
localIDBufs[proc].resize(numHalosOnOtherProcs[proc]);
653+
ownerOwnerLocalIDs[proc].resize(numHalosOnOtherProcs[proc]);
654+
ownerHaloLocalIDs[proc].resize(numHalosOnOtherProcs[proc]);
655655
for (int i = 0; i < numHalosOnOtherProcs[proc]; i++) {
656656
int globalID = recvBufs[proc][i*num_ints_per_copy];
657-
localIDBufs[proc][i] = global2local[globalID];
658-
}
659-
}
660-
}
661-
662-
//Owning process gets the global and local Id from halo process
663-
//Conver Global to local in this process and also local ID in halo process
664-
//So TODO make ownerTOHaloMap <int, int>
665-
for (int proc = 0; proc < numProcsTot; ++proc) {
666-
if (!recvBufs[proc].empty()) {
667-
for (int i = 0; i < numHalosOnOtherProcs[proc]; ++i) {
668-
int ownerLocalID = localIDBufs[proc][i];
669-
int haloLocalID = recvBufs[proc][i*num_ints_per_copy+1];
670-
ownerToHalos[proc].push_back(std::make_pair(ownerLocalID, haloLocalID));
657+
ownerOwnerLocalIDs[proc][i] = global2local[globalID];
658+
ownerHaloLocalIDs[proc][i] = recvBufs[proc][i*num_ints_per_copy+1];
671659
}
672660
}
673661
}
@@ -684,9 +672,9 @@ void MPMesh::startCommunication(){
684672

685673
//Sends back localID of the owned cells so that HaloToOwner can be done for halo processes
686674
for (int proc = 0; proc < numProcsTot; proc++) {
687-
if (!localIDBufs[proc].empty()) {
675+
if (numHalosOnOtherProcs[proc]>0) {
688676
MPI_Request req;
689-
MPI_Isend(localIDBufs[proc].data(), localIDBufs[proc].size(), MPI_INT, proc, 1, comm, &req);
677+
MPI_Isend(ownerOwnerLocalIDs[proc].data(), ownerOwnerLocalIDs[proc].size(), MPI_INT, proc, 1, comm, &req);
690678
requests.push_back(req);
691679
}
692680
}
@@ -721,23 +709,14 @@ void MPMesh::startCommunication(){
721709
MPI_Barrier(comm);
722710
//Check if now Rank 0 has the lids corresponing to rank 1
723711
if(self==1){
724-
for (int i=0; i<localIDBufs[0].size(); i++)
725-
printf("LIDs in owned rank 1 %d \n", localIDBufs[0][i]);
726-
//printf("Rank %d local 0 13 Global %d %d\n", self, ent2global_host(0), ent2global_host(13));
712+
for (int i=0; i<ownerOwnerLocalIDs[0].size(); i++)
713+
printf("LIDs in owned rank 1 %d \n", ownerOwnerLocalIDs[0][i]);
727714
}
728715
MPI_Barrier(comm);
729716
//Checking if they have received them back
730717
if(self==0){
731718
for (int i=0; i<haloOwnerLocalIDs[1].size(); i++)
732719
printf("Owner LID in rank 0 %d \n", haloOwnerLocalIDs[1][i]);
733-
//printf("Rank %d local 641 644 Global %d %d\n", self, ent2global_host(641), ent2global_host(644));
734-
}
735-
MPI_Barrier(comm);
736-
//OwnerToHalos
737-
if(self==1){
738-
for (const auto &p : ownerToHalos[0]) {
739-
std::cout << "(" << p.first << ", " << p.second << ")\n";
740-
}
741720
}
742721
}
743722

@@ -994,10 +973,6 @@ void MPMesh::communicateFields(const std::vector<std::vector<double>>& fieldData
994973

995974
assert(numEntities == numOwnersTot + numHalosTot);
996975

997-
std::vector<MPI_Request> recvRequests;
998-
std::vector<MPI_Request> sendRequests;
999-
1000-
std::vector<std::vector<int>> sendIDVec(numProcsTot);
1001976
std::vector<std::vector<double>> sendDataVec(numProcsTot);
1002977

1003978
recvIDVec.resize(numProcsTot);
@@ -1020,8 +995,6 @@ void MPMesh::communicateFields(const std::vector<std::vector<double>>& fieldData
1020995

1021996
if(numToSend > 0){
1022997
sendDataVec[i].reserve(numToSend*numEntries);
1023-
if (mode == 1) sendIDVec[i].reserve(numToSend);
1024-
1025998
}
1026999
if(numToRecv > 0){
10271000
recvDataVec[i].resize(numToRecv*numEntries);
@@ -1040,9 +1013,8 @@ void MPMesh::communicateFields(const std::vector<std::vector<double>>& fieldData
10401013

10411014
else if(mode == 1){
10421015
// Owner sends to halos
1043-
for (int iProc=0; iProc<ownerToHalos.size(); iProc++) {
1044-
for (auto& [ownerID, haloID] : ownerToHalos[iProc]) {
1045-
sendIDVec[iProc].push_back(haloID);
1016+
for (int iProc=0; iProc<ownerOwnerLocalIDs.size(); iProc++) {
1017+
for (auto& ownerID : ownerOwnerLocalIDs[iProc]) {
10461018
for (int iDouble = 0; iDouble < numEntries; iDouble++)
10471019
sendDataVec[iProc].push_back(fieldData[ownerID][iDouble]);
10481020
}
@@ -1063,7 +1035,6 @@ void MPMesh::communicateFields(const std::vector<std::vector<double>>& fieldData
10631035
requests.push_back(req4);
10641036
}
10651037
if(mode == 0 && numOwnersOnOtherProcs[proc]) {
1066-
10671038
assert(haloOwnerLocalIDs[proc].size() == (size_t)numOwnersOnOtherProcs[proc]);
10681039
assert(sendDataVec[proc].size() == haloOwnerLocalIDs[proc].size() * (size_t)numEntries);
10691040
MPI_Request req1, req2;
@@ -1082,7 +1053,7 @@ void MPMesh::communicateFields(const std::vector<std::vector<double>>& fieldData
10821053
}
10831054
if(mode == 1 && numHalosOnOtherProcs[proc]) {
10841055
MPI_Request req1, req2;
1085-
MPI_Isend(sendIDVec[proc].data(), sendIDVec[proc].size(), MPI_INT, proc, 1, comm, &req1);
1056+
MPI_Isend(ownerHaloLocalIDs[proc].data(), ownerHaloLocalIDs[proc].size(), MPI_INT, proc, 1, comm, &req1);
10861057
MPI_Isend(sendDataVec[proc].data(), sendDataVec[proc].size(), MPI_DOUBLE, proc, 2, comm, &req2);
10871058
requests.push_back(req1);
10881059
requests.push_back(req2);

0 commit comments

Comments
 (0)