Initial commit

parents
This diff is collapsed.
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "mpi.h"
const int MASTER_RANK = 0;
const int MASTER_SEND_DATA = 1;
const int MASTER_RECEIVE_DATA = 2;
void fillRandomData(double *m, int N)
{
for(int i = 0; i < N * N; ++ i)
m[i] = ((double) rand() / (RAND_MAX));
}
int main(int argc, char *argv[])
{
int rank;
int size;
int N;
double *A;
double *B;
double *C;
double *ARows;
if(argc > 1) {
N = atoi(argv[1]);
}
else {
exit(1);
}
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int thCount = size - 1;
int rowsPerTh = N / thCount;
int extraRowCount = N % thCount;
int offset = 0;
int rows;
MPI_Status status;
//Master thread aalocates data and send to worker threads
if (rank == MASTER_RANK) {
double start;
double end;
printf("Matrix size: %d\n", N);
printf("Thread count: %d\n", thCount);
printf("Rows per thread: %d\n", rowsPerTh);
printf("Extra rows: %d\n", extraRowCount);
A = new double[N*N];
B = new double[N*N];
C = new double[N*N];
fillRandomData(A, N);
fillRandomData(B, N);
start = MPI_Wtime();
//Send matrix A parts (of N/threadCount rows) and B matrix to worker threads
for (int th = 1; th <= thCount; ++th)
{
rows = (th <= extraRowCount) ? rowsPerTh + 1 : rowsPerTh;
MPI_Send(&rows, 1, MPI_INT, th, MASTER_SEND_DATA, MPI_COMM_WORLD);
MPI_Send(A + offset, rows*N, MPI_DOUBLE, th, MASTER_SEND_DATA, MPI_COMM_WORLD);
MPI_Send(B, N*N, MPI_DOUBLE, th, MASTER_SEND_DATA, MPI_COMM_WORLD);
offset = offset + rows * N;
}
offset = 0;
//Recieve matrix C parts (of N/threadCount rows) from worker threads
for (int th = 1; th <= thCount; ++th)
{
MPI_Recv(&rows, 1, MPI_INT, th, MASTER_RECEIVE_DATA, MPI_COMM_WORLD, &status);
MPI_Recv(C + offset, rows*N, MPI_DOUBLE, th, MASTER_RECEIVE_DATA, MPI_COMM_WORLD, &status);
offset = offset + rows * N;
}
end = MPI_Wtime();
printf("Work time: %lf\n", end - start);
}
else {
//Recieve row count
MPI_Recv(&rows, 1, MPI_INT, MASTER_RANK, MASTER_SEND_DATA, MPI_COMM_WORLD, &status);
//Allocate A matrix part and B matrix
ARows = new double[N * rows];
B = new double[N * N];
C = new double[N * rows];
//Recieve matrixes
MPI_Recv(ARows, rows*N, MPI_DOUBLE, MASTER_RANK, MASTER_SEND_DATA, MPI_COMM_WORLD, &status);
MPI_Recv(B, N*N, MPI_DOUBLE, MASTER_RANK, MASTER_SEND_DATA, MPI_COMM_WORLD, &status);
//Standart matrix rows per Matrix multiply
for (int k = 0; k < N; ++k) {
for (int i = 0; i < rows; ++i) {
C[i * N + k] = 0.0;
for (int j = 0; j < N; ++j)
C[i * N + k] = C[i * N + k] + ARows[i * N + j] * B[j * N + k];
}
}
//Send rows of C matrix to Master thread
MPI_Send(&rows, 1, MPI_INT, MASTER_RANK, MASTER_RECEIVE_DATA, MPI_COMM_WORLD);
MPI_Send(C, rows * N, MPI_DOUBLE, MASTER_RANK, MASTER_RECEIVE_DATA, MPI_COMM_WORLD);
}
MPI_Finalize();
}
#include "mpi.h"
#include <stdio.h>
#include <math.h>
const int MASTER_RANK = 0;
struct GridInfo
{
int procNum = 0;
int procRank = 0;
int gridSize;
int gridCoordinates[2];
MPI_Comm gridComm;
MPI_Comm colComm;
MPI_Comm rowComm;
}
void initCommunicators(MpiInfo &mpiInfo) {
int dimSize[2];
int periodic[2];
int dubdims[2];
dimSize[0] = mpiInfo.gridSize;
dimSize[1] = mpiInfo.gridSize;
periodic[0] = 0;
periodic[1] = 0;
MPI_Cart_create(MPI_COMM_WORLD, 2, DimSize, Periodic, 1, &mpiInfo.gridComm);
MPI_Cart_coords(GridComm, ProcRank, 2, mpiInfo.gridCoordinates);
Subdims[0] = 0;
Subdims[1] = 1;
MPI_Cart_sub(mpiInfo.gridComm, subdims, &mpiInfo.rowComm);
Subdims[0] = 1;
Subdims[1] = 0;
MPI_Cart_sub(mpiInfo.gridComm, subdims, &mpiInfo.colComm);
}
void fillRandomData(double *m, int N)
{
for(int i = 0; i < N * N; ++ i)
m[i] = ((double) rand() / (RAND_MAX));
}
int main (int argc, char *argv[])
{
int N = 0;
if(argc > 1)
{
N = atoi(argv[1])
}
else
{
exit(1);
}
double* A;
double* B;
double* C;
int size;
int blockSize;
double *ABlock;
double *BBlock;
double *CBlock;
double *tempA;
MPI_Init(&argc, $argv);
MpiInfo mpiInfo;
MPI_Comm_size(MPI_COMM_WORLD, &info.procNum);
MPI_Comm_rank(MPI_COMM_WORLD, &info.procRank);
info.gridSize = sqrt((double)info.procNum);
if (info.procNum != info.gridSize * info.gridSize) {
if (info.procRank == MASTER_RANK) {
printf ("Not square proc num \n");
}
MPI_Finalize();
exit(1);
}
initCommunicators(mpiInfo);
MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);
blockSize = N / info.gridSize;
if(mpiInfo.procRank == MASTER_RANK)
{
A = new double [N * N];
B = new double [N * N];
C = new double [N * N];
fillRandomData(A, N);
fillRandomData(B, N);
}
ABlock = new double [BlockSize*BlockSize];
BBlock = new double [BlockSize*BlockSize];
CBlock = new double [BlockSize*BlockSize];
tempA = new double [BlockSize*BlockSize];
for (int i=0; i< blockSize * blockSize; i++)
CBlock[i] = 0;
initData( pAMatrix, pBMatrix, pCMatrix, pAblock, pBblock,
pCblock, tempA, Size, BlockSize );
DataDistribution(pAMatrix, pBMatrix, pMatrixAblock, pBblock, Size,
BlockSize);
ParallelResultCalculation(pAblock, pMatrixAblock, pBblock,
pCblock, BlockSize);
ResultCollection(pCMatrix, pCblock, Size, BlockSize);
TestResult(pAMatrix, pBMatrix, pCMatrix, Size);
ProcessTermination (pAMatrix, pBMatrix, pCMatrix, pAblock, pBblock,
pCblock, pMatrixAblock);
MPI_Finalize();
}
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "mpi.h"
const int MASTER_RANK = 0;
const int MASTER_SEND_DATA = 1;
const int MASTER_RECEIVE_DATA = 2;
const int EXCHANGE_GHOST_CELLS = 3;
double l(int option)
{
switch(option)
{
case 1: return 1.0;
case 2: return M_PI;
case 3: return 1.0;
case 4: return 1.0;
case 5: return M_PI;
default: return .0;
}
}
double u_x_0(double x, int option)
{
switch(option)
{
case 1: return 0.2 * x * (1.0 - x) * sin(M_PI * x);
case 2: return (M_PI - x) * x;
case 3: return sin(M_PI * x);
case 4: return x * sin(M_PI * x);
case 5: return x * x * (M_PI - x * x);
default: return .0;
}
}
double u_t_x_0(double x, int option)
{
return .0;
}
double u_0_t(double t, int option)
{
return .0;
}
double u_l_t(double t, int option)
{
switch(option)
{
case 1: return .0;
case 2: return .0;
case 3: return .0;
case 4: return .0;
case 5: return 1.0;
default: return .0;
}
}
int main(int argc, char *argv[])
{
int rank;
int size;
double dt, dx;
unsigned int N, k;
double T;
int option;
double *u;
if(argc > 4) {
option = atoi(argv[1]);
T = atof(argv[2]);
dx = atof(argv[3]);
dt = atof(argv[4]);
}
else {
exit(1);
}
k = (int) floor(T / dt);
N = (int) floor(l(option) / dx);
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int thCount = size - 1;
unsigned int colsPerTh = N / thCount;
unsigned int extraColCount = N % thCount;
// MPI_Status status;
int offset = 0;
int cols;
//Master thread aalocates data and send to worker threads
if (rank == MASTER_RANK) {
double start;
double end;
printf("T: %lf\n", T);
printf("dt: %.20lf\n", dt);
printf("dx: %lf\n", dx);
printf("X part num: %u\n", N);
printf("T part num: %u\n", k);
printf("Thread count: %d\n", thCount);
printf("X parts per thread: %u\n", colsPerTh);
printf("Extra X parts: %u\n", extraColCount);
u = new double[k * N];
start = MPI_Wtime();
for (int th = 1; th <= thCount; ++th)
{
cols = (th <= extraColCount) ? colsPerTh + 1 : colsPerTh;
MPI_Send(&offset, 1, MPI_INT, th, MASTER_SEND_DATA, MPI_COMM_WORLD);
offset = offset + cols;
}
offset = 0;
//Recieve matrix U parts (of N/threadCount cols) from worker threads
for (int th = 1; th <= thCount; ++th)
{
MPI_Recv(&cols, 1, MPI_INT, th, MASTER_RECEIVE_DATA, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(u + offset, k * cols, MPI_DOUBLE, th, MASTER_RECEIVE_DATA, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
offset = offset + k * cols;
}
end = MPI_Wtime();
printf("Work time: %lf\n", end - start);
}
else {
MPI_Recv(&offset, 1, MPI_INT, MASTER_RANK, MASTER_SEND_DATA, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
cols = (rank <= extraColCount) ? colsPerTh + 1 : colsPerTh;
// add two ghost cells
u = new double[k * cols];
double x = .0;
double t = .0;
for(unsigned int n = 0 ; n < k; ++n)
{
//fill start value
if(n == 0)
{
for(unsigned int j = 0 ; j < cols; ++j)
u[j] = u_x_0((j + offset) * dx, option);
}
else if(n == 1)
{
for(unsigned int j = 0 ; j < cols; ++j)
u[k + j] = u[j] + dt * u_t_x_0((j + offset) * dx, option);
}
else
{
double u_gost_prev, u_gost_next;
for(unsigned int j = 0 ;j < cols; ++j)
{
double u_prev, u_next;
if(j > 0)
u_prev = u[(n - 1) * cols + j - 1];
else
u_prev = u_gost_prev;
if(j < N - 1)
u_next = u[(n - 1) * cols + j + 1];
else
u_next = u_gost_next;
if((j + offset) > 0 && (j + offset) < N - 1)
{
u[n * cols + j] = 2 * u[(n - 1) * cols + j] - u[(n - 2) * cols + j] +
((dt * dt) / (dx * dx)) * (u_next - 2 * u[(n - 1) * cols + j] + u_prev);
}
else if((j + offset) == 0)
{
u[n * cols + j] = u_0_t(dt * n, option);
}
else if((j + offset) == N - 1)
{
u[n * cols + j] = u_l_t(dt * n, option);
}
}
if (rank < size - 1) {
MPI_Send(&u[n * cols + cols - 1], 1, MPI_DOUBLE, rank + 1, EXCHANGE_GHOST_CELLS, MPI_COMM_WORLD);
}
if (rank > 1) {
MPI_Send(&u[n * cols], 1, MPI_DOUBLE, rank - 1, EXCHANGE_GHOST_CELLS, MPI_COMM_WORLD);
}
if (rank < size - 1) {
MPI_Recv(&u_gost_next, 1, MPI_DOUBLE, rank + 1, EXCHANGE_GHOST_CELLS, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
if (rank > 1) {
MPI_Recv(&u_gost_prev, 1, MPI_DOUBLE, rank - 1, EXCHANGE_GHOST_CELLS, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
}
//Send cols of U matrix to Master thread
MPI_Send(&cols, 1, MPI_INT, MASTER_RANK, MASTER_RECEIVE_DATA, MPI_COMM_WORLD);
MPI_Send(u, k * cols, MPI_DOUBLE, MASTER_RANK, MASTER_RECEIVE_DATA, MPI_COMM_WORLD);
}
MPI_Finalize();
}
#!/usr/bin/python
import sys
import os
import subprocess
import re
import time
def _parse_arguments(argv):
from argparse import ArgumentParser
parser = ArgumentParser(prog="run.py",
description='\nCompiles and run MPI tasks')
parser.add_argument("-t", "--task",dest="task", choices=['matrix','thermal','oscillation'], default='matrix',
help='Task to run', required=True)
parser.add_argument("-w", "--wait",action="store_true", dest="wait",
help='Wait for task end in queue', required=True)
parser.add_argument("-np", dest="threads", type=int,
help='Specify the number of threads', required=True)
parser.add_argument("-o", "--option", dest="variant", type=int,
help='Specify the variant', required=True)
group = parser.add_argument_group('Matrix options')
group.add_argument("-n", dest="matrix_size", type=int, help='Specify the matrix size')
group = parser.add_argument_group('Thermal equation options')
group.add_argument("-N", dest="N", type=int, help='Specify the N(x part num)')
group.add_argument("-t_them", dest="T", type=float, help='Specify the T (time length)')
group = parser.add_argument_group('Oscillation equation options')
group.add_argument("-t_osc", dest="T", type=float, help='Specify the T (time length)')
group.add_argument("-dt", dest="dt", type=float, help='Specify the dt')
group.add_argument("-dx", dest="dx", type=float, help='Specify the dx')
return parser.parse_known_args(argv)
if __name__ == "__main__":
(args, unknow) = _parse_arguments(sys.argv)
if len(sys.argv) == 1 or sys.argv[1] in ('-h', '--help'):
sys.exit(0)
filename = args.task
out_dir = "compiled"
if not os.path.exists(out_dir):
os.mkdir(out_dir)
run_file = os.path.join(out_dir, filename)
print('==> Compiling "%s" program!' % (filename))
os.system('mpicxx %s.cc -o %s' % (filename, run_file))
run_args = ""
if args.task == 'matrix':
run_args = '%d' % (args.matrix_size)
elif args.task == 'thermal':
run_args = '%d %d %lf' % (args.option, args.N, args.T)
elif args.task == 'thermal':
run_args = '%d %lf %lf %lf' % (args.option, args.T, args.dx, args.dt)
if not args.wait:
os.system('mpirun -np %d %s %s' % (args.threads + 1, run_file, run_args))
else:
proc = subprocess.Popen('mpirun -np %d %s %s' % (args.threads + 1, run_file, run_args), stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print(out)
find_proc_name = re.compile('%s\.\d+' % (filename))
proc_names = find_proc_name.findall(out)
if len(proc_names) >= 1:
print(proc_names[0])
proc_name = proc_names[0];
wait_proc_re = re.compile(proc_name)
while(True):
proc = subprocess.Popen('mqinfo', stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
if wait_proc_re.search(out) == None:
f = open(os.path.join(proc_name, 'output'),'r')
print(f.read())
f.close()
break;
time.sleep(1)
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "mpi.h"
const int MASTER_RANK = 0;
const int MASTER_SEND_DATA = 1;
const int MASTER_RECEIVE_DATA = 2;
const int EXCHANGE_GHOST_CELLS = 3;
double l(int option)
{
switch(option)
{
case 1: return 1.0;
case 2: return 4.0;
case 3: return 5.0;
case 4: return 1.0;
case 5: return 2.0;
default: return .0;
}
}
double u_x_0(double x, int option)
{
switch(option)
{
case 1: return sin(M_PI * x);
case 2: return 2 * x + 1.0;
case 3: return x * x / 2.0;
case 4: return 4.0 * x * (1.0 - x);
case 5: return x * x / 4.0;
default: return .0;
}
}
double u_0_t(double t, int option)
{
switch(option)
{
case 1: return .0;
case 2: return 1.0;
case 3: return t;
case 4: return .0;
case 5: return .0;
default: return .0;
}
}
double u_l_t(double t, int option)
{
switch(option)
{
case 1: return .0;
case 2: return 9.0;
case 3: return t + 20.0;
case 4: return .0;
case 5: return 1.0;
default: return .0;
}
}
int main(int argc, char *argv[])
{
int rank;
int size;
double dt, dx;
unsigned int N, k;
double T;
int option;
double *u;