I have this:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdbool.h>
#include "mpi/mpi.h"
struct fields{
int hostNumber;
int *numberArray;
};
struct minMaxValue{
int value;
int index;
};
struct fields *start(int);
struct fields *gatherData(int);
int main(int argc, char** argv) {
int rank, size, *tmpArray, *numPerProcess, *rcvBuff, *recvDBuffer,
*recvPfSumsbuf, *recvPfBuffer, sum, cntMin, cntMax, minVal,
maxVal, minIndex = 0, maxIndex = 0, minMaxArray[2], *d, *displs, remNumPerProcess;
float avg = 0.0, sumAvg = 0.0,
spreading = 0.0, rcvTotSpreading,totAvg = 0.0;
struct fields *myFields;
bool flagCase;
setbuf(stdout, NULL);
/* MPI Initialization */
MPI_Init(&argc, &argv);
MPI_Status status;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
while(1){
/* Initialize required buffer */
myFields = (struct fields *)malloc(sizeof(struct fields));
if (myFields == NULL){
printf("Cannot allocate memory for myFields buffer!
");
MPI_Abort(MPI_COMM_WORLD, 0);
}
numPerProcess = (int *)malloc(sizeof(int) * size);
if (numPerProcess == NULL){
printf("Cannot allocate memory for numPerProcess buffer!
");
MPI_Abort(MPI_COMM_WORLD, 0);
}
displs = (int *)malloc(sizeof(int) * size);
if (displs == NULL){
printf("Cannot allocate memory for displs buffer!
");
MPI_Abort(MPI_COMM_WORLD, 0);
}
if (rank == 0){
//printf("I am parent-process with rank = %d, size = %d
", rank, size);
/* Gather User's Data for manipulation */
myFields = start(rank);
//for (int i = 0 ; i < myFields->hostNumber ; i++) printf("%d
",((myFields->numberArray)[i]));
/* Check if user has enter less data than the number of process */
if ((myFields->hostNumber) < size){
printf("Error!!! Number of Processes is more than number of Elements.
");
MPI_Abort(MPI_COMM_WORLD, 0);
}
/* Required Flag to distinct between two types of execution .
* First Case is n = kp
* Second Case is for every n,p
*/
if (((myFields->hostNumber) % size) == 0){
flagCase = true;
}
else flagCase = false;
/* Calculate the remainder from the devision hostNumbers(Number of elements in array) / size (Number of processes) */
remNumPerProcess = (myFields->hostNumber % size);
/* Initial Declaration of numPerProcess Array with the value of
* division between hostNumbers(Number of elements in array) / size (Number of processes)
*/
for (int i = 0; i < size; i++) numPerProcess[i] = (myFields->hostNumber / size);
/* On second case (every n,p) assign the remaining array's allements to process equal distributed */
if (!flagCase){
for (int i = 0; i < remNumPerProcess; i ++) numPerProcess[i]++;
}
/* Initialize and save user's data to tmpArray */
tmpArray = (int *)malloc(sizeof(int) * (myFields->hostNumber));
if (tmpArray == NULL){
printf("Cannot allocate memory for tmpArray buffer!
");
MPI_Abort(MPI_COMM_WORLD, 0);
}
for (int i = 0; i < myFields->hostNumber; i++) tmpArray[i] = ((myFields->numberArray)[i]);
//for (int i= 0; i < myFields->hostNumber; i ++) printf("tmpArray[%d] = %d
", i, tmpArray[i]);
//for (int i = 0; i < size; i ++) printf("numPerProcess[%d] = %d
", i, numPerProcess[i]);
}
/* Broadcast flagCase to all processes */
MPI_Bcast(&flagCase, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD);
/* Broadcast numPerProcess Buffer to all processes */
MPI_Bcast(numPerProcess, size, MPI_INT, 0, MPI_COMM_WORLD);
/* Initialize and assign values to displs Buffer according to the element's number
* every process should get. On case1 (n = kp) just assign the division between
* hostNumbers(Number of elements in array) / size (Number of processes).
* On case2 (every n,p) assign equal the remaining array's elements
*/
for (int i = 0; i < size; i ++){
if (!flagCase){
if (i == 0) displs[i] = i * (numPerProcess[i]);
else displs[i] = (i * numPerProcess[i] + 1);
}
else{
if (i == 0) displs[i] = i * (numPerProcess[i]);
else displs[i] = (i * numPerProcess[i]);
}
}
//for (int i = 0; i < size; i ++) printf("displs[%d] = %d
", i, displs[i]);
/* Initialize required Buffer to hold the element's MPI_SCATTERV() returns */
rcvBuff = (int *)malloc(sizeof(int) * numPerProcess[rank]);
if (rcvBuff == NULL){
printf("Cannot allocate memory for rcvBuff buffer!
");
MPI_Abort(MPI_COMM_WORLD, 0);
}
//MPI_Scatter(tmpArray, numPerProcess, MPI_INT, rcvBuff, numPerProcess, MPI_INT, 0, MPI_COMM_WORLD);
/* Perform MPI_Scatterv() to assign elements to every process according to the numPerProcess and displs Buffers
* numPerProcess for example when I execute the program with 4 processes and 13 elements should be like
* numPerProcess = [4,3,3,3] and displs = [0 ,4, 7, 10].
* When I run it with 4 processes and 8 elements numPerProcess should be
* numPerProcess = [2, 2, 2, 2] and displs = [0, 2, 4, 6]
*/
MPI_Scatterv(tmpArray, numPerProcess, displs, MPI_INT, rcvBuff, numPerProcess[rank], MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
free(tmpArray);
/* Initialize d Buffer for calculate D vector */
d = (int *)malloc(sizeof(int) * size * numPerProcess[rank]);
if (d == NULL){
printf("Cannot allocate memory for d buffer!
");
MPI_Abort(MPI_COMM_WORLD, 0);
}
/*for (int i = 0; i < numPerProcess[rank]; i++){
printf("process = %d, numPerProcess = %d, tmpArray[%d] = %d
", rank, numPerProcess[rank], i, rcvBuff[i]);
}*/
sum = 0;
cntMin = cntMax = 0;
sumAvg = 0.0;
/* Every Process should calculate the sums and avg on their data */
for (int i = 0; i < numPerProcess[rank]; i++){
sum += rcvBuff[i];
}
avg = ((float)sum / numPerProcess[rank]);
/* Every Process should calculate the number of elements is more or less than avg */
for (int i = 0; i < numPerProcess[rank]; i++){
if (((float)(rcvBuff[i])) < avg) cntMin++;
else if (((float)(rcvBuff[i])) > avg) cntMax++;
}
printf("process = %d, sum = %d, cntMax = %d, cntMin = %d, avg = %f
", rank, sum, cntMax, cntMin, avg);
/* MPI_Reduce sum('s) all the avg('s) from all the process to process 0,
* process 0 then calculate the total vector's avg
*/
MPI_Reduce(&avg, &totAvg, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) printf("The sequence's Avg is %f
", ((float)(totAvg) / size));
minVal = maxVal = rcvBuff[0];
/* Every Process should find the min and max from the elements belongs to it */
for (int i = 0; i < numPerProcess[rank]; i++){
if (minVal > rcvBuff[i]){
minVal = rcvBuff[i];
minIndex = i;
}
if (maxVal < rcvBuff[i]){
maxVal = rcvBuff[i];
maxIndex = i;
}
}
//printf("process = %d, minindex = %d, maxindex=%d
",rank,minIndex,maxIndex);
struct minMaxValue *structIn, *structOut;
/* Initialize struct(s) structIn, structOut */
structIn = (struct minMaxValue *)malloc(sizeof(struct minMaxValue));
if (structIn == NULL){
printf("Cannot allocate memory for structIn buffer!
");
MPI_Abort(MPI_COMM_WORLD, 0);
}
structOut = (struct minMaxValue *)malloc(sizeof(struct minMaxValue));
if (structOut == NULL){
printf("Cannot allocate memory for structOut buffer!
");
MPI_Abort(MPI_COMM_WORLD, 0);
}
/* Assign values to structIn. StructIn should keep the maxVal of the elements and
* the index it belongs to, relative to the order on vector with the elements every
* process has. Here I perfrom MPI_Reduce with MPI_MAXLOC although is not required,
* just a MPI_Gather is enough.
*/
structIn->value = maxVal;
structIn->index = ((rank * numPerProcess[rank]) + maxIndex);
MPI_Reduce(structIn, structOut, 1, MPI_2INT, MPI_MAXLOC, 0, MPI_COMM_WORLD);
if (rank == 0) printf("maxStructOut.val = %d, maxStructOut.pos = %d
", structOut->value, structOut->index);
MPI_Barrier(MPI_COMM_WORLD);
/* On rank 0 save maxValue on minMaxArray Buffer */
if (rank == 0){
minMaxArray[0] = structOut->value;
}
/* Assign values to structIn. StructIn now should keep the minVal of the elements and
* the index it belongs to, relative to the order on vector with the elements every
* process has. Here I perfrom MPI_Reduce with MPI_MAXLOC although is not required,
* just a MPI_Gather is enough.
*/
structIn->value = minVal;
structIn->index = ((rank * numPerProcess[rank]) + minIndex);
MPI_Reduce(structIn, structOut, 1, MPI_2INT, MPI_MINLOC, 0, MPI_COMM_WORLD);
if (rank == 0) printf("minStructOut.val = %d, minStructOut.pos = %d
", structOut->value, structOut->index);
MPI_Barrier(MPI_COMM_WORLD);
/* On rank 0 save minValue on minMaxArray Buffer */
if (rank == 0){
minMaxArray[1] = structOut->value;
}
free(structIn);
free(structOut);
spreading = 0.0;
/* Calculate the spreading between the elements ervery proces has */
for (int i = 0; i < numPerProcess[rank]; i ++){
spreading += ((rcvBuff[i] - avg) * (rcvBuff[i] - avg));
}
/* Send spreading to process 0 while at the same time calculate the sum(s) from
* all the spreading(s) every process has.
*/
MPI_Reduce(&spreading, &rcvTotSpreading, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) printf("Spreading = %f
", rcvTotSpreading);
MPI_Barrier(MPI_COMM_WORLD);
/* Bcast the minMaxArray to process 0. MinMaxArray hold the min and