Actual source code: ex74.c
1: static char help[] = "Solves the constant-coefficient 1D heat equation \n\
2: with an Implicit Runge-Kutta method using MatKAIJ. \n\
3: \n\
4: du d^2 u \n\
5: -- = a ----- ; 0 <= x <= 1; \n\
6: dt dx^2 \n\
7: \n\
8: with periodic boundary conditions \n\
9: \n\
10: 2nd order central discretization in space: \n\
11: \n\
12: [ d^2 u ] u_{i+1} - 2u_i + u_{i-1} \n\
13: [ ----- ] = ------------------------ \n\
14: [ dx^2 ]i h^2 \n\
15: \n\
16: i = grid index; h = x_{i+1}-x_i (Uniform) \n\
17: 0 <= i < n h = 1.0/n \n\
18: \n\
19: Thus, \n\
20: \n\
21: du \n\
22: -- = Ju; J = (a/h^2) tridiagonal(1,-2,1)_n \n\
23: dt \n\
24: \n\
25: Implicit Runge-Kutta method: \n\
26: \n\
27: U^(k) = u^n + dt \\sum_i a_{ki} JU^{i} \n\
28: u^{n+1} = u^n + dt \\sum_i b_i JU^{i} \n\
29: \n\
30: i = 1,...,s (s -> number of stages) \n\
31: \n\
32: At each time step, we solve \n\
33: \n\
34: [ 1 ] 1 \n\
35: [ -- I \\otimes A^{-1} - J \\otimes I ] U = -- u^n \\otimes A^{-1} \n\
36: [ dt ] dt \n\
37: \n\
38: where A is the Butcher tableaux of the implicit \n\
39: Runge-Kutta method, \n\
40: \n\
41: with MATKAIJ and KSP. \n\
42: \n\
43: Available IRK Methods: \n\
44: gauss n-stage Gauss method \n\
45: \n";
47: /*
48: Include "petscksp.h" so that we can use KSP solvers. Note that this file
49: automatically includes:
50: petscsys.h - base PETSc routines
51: petscvec.h - vectors
52: petscmat.h - matrices
53: petscis.h - index sets
54: petscviewer.h - viewers
55: petscpc.h - preconditioners
56: */
57: #include <petscksp.h>
58: #include <petscdt.h>
60: /* define the IRK methods available */
61: #define IRKGAUSS "gauss"
63: typedef enum {
64: PHYSICS_DIFFUSION,
65: PHYSICS_ADVECTION
66: } PhysicsType;
67: const char *const PhysicsTypes[] = {"DIFFUSION","ADVECTION","PhysicsType","PHYSICS_",NULL};
69: typedef struct __context__ {
70: PetscReal a; /* diffusion coefficient */
71: PetscReal xmin,xmax; /* domain bounds */
72: PetscInt imax; /* number of grid points */
73: PetscInt niter; /* number of time iterations */
74: PetscReal dt; /* time step size */
75: PhysicsType physics_type;
76: } UserContext;
78: static PetscErrorCode ExactSolution(Vec,void*,PetscReal);
79: static PetscErrorCode RKCreate_Gauss(PetscInt,PetscScalar**,PetscScalar**,PetscReal**);
80: static PetscErrorCode Assemble_AdvDiff(MPI_Comm,UserContext*,Mat*);
82: #include <petsc/private/kernels/blockinvert.h>
84: int main(int argc, char **argv)
85: {
86: PetscErrorCode ierr;
87: Vec u,uex,rhs,z;
88: UserContext ctxt;
89: PetscInt nstages,is,ie,matis,matie,*ix,*ix2;
90: PetscInt n,i,s,t,total_its;
91: PetscScalar *A,*B,*At,*b,*zvals,one = 1.0;
92: PetscReal *c,err,time;
93: Mat Identity,J,TA,SC,R;
94: KSP ksp;
95: PetscFunctionList IRKList = NULL;
96: char irktype[256] = IRKGAUSS;
98: PetscInitialize(&argc,&argv,(char*)0,help);
99: PetscFunctionListAdd(&IRKList,IRKGAUSS,RKCreate_Gauss);
101: /* default value */
102: ctxt.a = 1.0;
103: ctxt.xmin = 0.0;
104: ctxt.xmax = 1.0;
105: ctxt.imax = 20;
106: ctxt.niter = 0;
107: ctxt.dt = 0.0;
108: ctxt.physics_type = PHYSICS_DIFFUSION;
110: PetscOptionsBegin(PETSC_COMM_WORLD,NULL,"IRK options","");
111: PetscOptionsReal("-a","diffusion coefficient","<1.0>",ctxt.a,&ctxt.a,NULL);
112: PetscOptionsInt ("-imax","grid size","<20>",ctxt.imax,&ctxt.imax,NULL);
113: PetscOptionsReal("-xmin","xmin","<0.0>",ctxt.xmin,&ctxt.xmin,NULL);
114: PetscOptionsReal("-xmax","xmax","<1.0>",ctxt.xmax,&ctxt.xmax,NULL);
115: PetscOptionsInt ("-niter","number of time steps","<0>",ctxt.niter,&ctxt.niter,NULL);
116: PetscOptionsReal("-dt","time step size","<0.0>",ctxt.dt,&ctxt.dt,NULL);
117: PetscOptionsFList("-irk_type","IRK method family","",IRKList,irktype,irktype,sizeof(irktype),NULL);
118: nstages = 2;
119: PetscOptionsInt ("-irk_nstages","Number of stages in IRK method","",nstages,&nstages,NULL);
120: PetscOptionsEnum("-physics_type","Type of process to discretize","",PhysicsTypes,(PetscEnum)ctxt.physics_type,(PetscEnum*)&ctxt.physics_type,NULL);
121: PetscOptionsEnd();
123: /* allocate and initialize solution vector and exact solution */
124: VecCreate(PETSC_COMM_WORLD,&u);
125: VecSetSizes(u,PETSC_DECIDE,ctxt.imax);
126: VecSetFromOptions(u);
127: VecDuplicate(u,&uex);
128: /* initial solution */
129: ExactSolution(u ,&ctxt,0.0);
130: /* exact solution */
131: ExactSolution(uex,&ctxt,ctxt.dt*ctxt.niter);
133: { /* Create A,b,c */
134: PetscErrorCode (*irkcreate)(PetscInt,PetscScalar**,PetscScalar**,PetscReal**);
135: PetscFunctionListFind(IRKList,irktype,&irkcreate);
136: (*irkcreate)(nstages,&A,&b,&c);
137: }
138: { /* Invert A */
139: /* PETSc does not provide a routine to calculate the inverse of a general matrix.
140: * To get the inverse of A, we form a sequential BAIJ matrix from it, consisting of a single block with block size
141: * equal to the dimension of A, and then use MatInvertBlockDiagonal(). */
142: Mat A_baij;
143: PetscInt idxm[1]={0},idxn[1]={0};
144: const PetscScalar *A_inv;
145: MatCreateSeqBAIJ(PETSC_COMM_SELF,nstages,nstages,nstages,1,NULL,&A_baij);
146: MatSetOption(A_baij,MAT_ROW_ORIENTED,PETSC_FALSE);
147: MatSetValuesBlocked(A_baij,1,idxm,1,idxn,A,INSERT_VALUES);
148: MatAssemblyBegin(A_baij,MAT_FINAL_ASSEMBLY);
149: MatAssemblyEnd(A_baij,MAT_FINAL_ASSEMBLY);
150: MatInvertBlockDiagonal(A_baij,&A_inv);
151: PetscMemcpy(A,A_inv,nstages*nstages*sizeof(PetscScalar));
152: MatDestroy(&A_baij);
153: }
154: /* Scale (1/dt)*A^{-1} and (1/dt)*b */
155: for (s=0; s<nstages*nstages; s++) A[s] *= 1.0/ctxt.dt;
156: for (s=0; s<nstages; s++) b[s] *= (-ctxt.dt);
158: /* Compute row sums At and identity B */
159: PetscMalloc2(nstages,&At,PetscSqr(nstages),&B);
160: for (s=0; s<nstages; s++) {
161: At[s] = 0;
162: for (t=0; t<nstages; t++) {
163: At[s] += A[s+nstages*t]; /* Row sums of */
164: B[s+nstages*t] = 1.*(s == t); /* identity */
165: }
166: }
168: /* allocate and calculate the (-J) matrix */
169: switch (ctxt.physics_type) {
170: case PHYSICS_ADVECTION:
171: case PHYSICS_DIFFUSION:
172: Assemble_AdvDiff(PETSC_COMM_WORLD,&ctxt,&J);
173: }
174: MatCreate(PETSC_COMM_WORLD,&Identity);
175: MatSetType(Identity,MATAIJ);
176: MatGetOwnershipRange(J,&matis,&matie);
177: MatSetSizes(Identity,matie-matis,matie-matis,ctxt.imax,ctxt.imax);
178: MatSetUp(Identity);
179: for (i=matis; i<matie; i++) {
180: MatSetValues(Identity,1,&i,1,&i,&one,INSERT_VALUES);
181: }
182: MatAssemblyBegin(Identity,MAT_FINAL_ASSEMBLY);
183: MatAssemblyEnd (Identity,MAT_FINAL_ASSEMBLY);
185: /* Create the KAIJ matrix for solving the stages */
186: MatCreateKAIJ(J,nstages,nstages,A,B,&TA);
188: /* Create the KAIJ matrix for step completion */
189: MatCreateKAIJ(J,1,nstages,NULL,b,&SC);
191: /* Create the KAIJ matrix to create the R for solving the stages */
192: MatCreateKAIJ(Identity,nstages,1,NULL,At,&R);
194: /* Create and set options for KSP */
195: KSPCreate(PETSC_COMM_WORLD,&ksp);
196: KSPSetOperators(ksp,TA,TA);
197: KSPSetFromOptions(ksp);
199: /* Allocate work and right-hand-side vectors */
200: VecCreate(PETSC_COMM_WORLD,&z);
201: VecSetFromOptions(z);
202: VecSetSizes(z,PETSC_DECIDE,ctxt.imax*nstages);
203: VecDuplicate(z,&rhs);
205: VecGetOwnershipRange(u,&is,&ie);
206: PetscMalloc3(nstages,&ix,nstages,&zvals,ie-is,&ix2);
207: /* iterate in time */
208: for (n=0,time=0.,total_its=0; n<ctxt.niter; n++) {
209: PetscInt its;
211: /* compute and set the right hand side */
212: MatMult(R,u,rhs);
214: /* Solve the system */
215: KSPSolve(ksp,rhs,z);
216: KSPGetIterationNumber(ksp,&its);
217: total_its += its;
219: /* Update the solution */
220: MatMultAdd(SC,z,u,u);
222: /* time step complete */
223: time += ctxt.dt;
224: }
225: PetscFree3(ix,ix2,zvals);
227: /* Deallocate work and right-hand-side vectors */
228: VecDestroy(&z);
229: VecDestroy(&rhs);
231: /* Calculate error in final solution */
232: VecAYPX(uex,-1.0,u);
233: VecNorm(uex,NORM_2,&err);
234: err = PetscSqrtReal(err*err/((PetscReal)ctxt.imax));
235: PetscPrintf(PETSC_COMM_WORLD,"L2 norm of the numerical error = %g (time=%g)\n",(double)err,(double)time);
236: PetscPrintf(PETSC_COMM_WORLD,"Number of time steps: %D (%D Krylov iterations)\n",ctxt.niter,total_its);
238: /* Free up memory */
239: KSPDestroy(&ksp);
240: MatDestroy(&TA);
241: MatDestroy(&SC);
242: MatDestroy(&R);
243: MatDestroy(&J);
244: MatDestroy(&Identity);
245: PetscFree3(A,b,c);
246: PetscFree2(At,B);
247: VecDestroy(&uex);
248: VecDestroy(&u);
249: PetscFunctionListDestroy(&IRKList);
251: PetscFinalize();
252: return 0;
253: }
255: PetscErrorCode ExactSolution(Vec u,void *c,PetscReal t)
256: {
257: UserContext *ctxt = (UserContext*) c;
258: PetscInt i,is,ie;
259: PetscScalar *uarr;
260: PetscReal x,dx,a=ctxt->a,pi=PETSC_PI;
262: dx = (ctxt->xmax - ctxt->xmin)/((PetscReal) ctxt->imax);
263: VecGetOwnershipRange(u,&is,&ie);
264: VecGetArray(u,&uarr);
265: for (i=is; i<ie; i++) {
266: x = i * dx;
267: switch (ctxt->physics_type) {
268: case PHYSICS_DIFFUSION:
269: uarr[i-is] = PetscExpScalar(-4.0*pi*pi*a*t)*PetscSinScalar(2*pi*x);
270: break;
271: case PHYSICS_ADVECTION:
272: uarr[i-is] = PetscSinScalar(2*pi*(x - a*t));
273: break;
274: default: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"No support for physics type %s",PhysicsTypes[ctxt->physics_type]);
275: }
276: }
277: VecRestoreArray(u,&uarr);
278: return 0;
279: }
281: /* Arrays should be freed with PetscFree3(A,b,c) */
282: static PetscErrorCode RKCreate_Gauss(PetscInt nstages,PetscScalar **gauss_A,PetscScalar **gauss_b,PetscReal **gauss_c)
283: {
284: PetscScalar *A,*G0,*G1;
285: PetscReal *b,*c;
286: PetscInt i,j;
287: Mat G0mat,G1mat,Amat;
289: PetscMalloc3(PetscSqr(nstages),&A,nstages,gauss_b,nstages,&c);
290: PetscMalloc3(nstages,&b,PetscSqr(nstages),&G0,PetscSqr(nstages),&G1);
291: PetscDTGaussQuadrature(nstages,0.,1.,c,b);
292: for (i=0; i<nstages; i++) (*gauss_b)[i] = b[i]; /* copy to possibly-complex array */
294: /* A^T = G0^{-1} G1 */
295: for (i=0; i<nstages; i++) {
296: for (j=0; j<nstages; j++) {
297: G0[i*nstages+j] = PetscPowRealInt(c[i],j);
298: G1[i*nstages+j] = PetscPowRealInt(c[i],j+1)/(j+1);
299: }
300: }
301: /* The arrays above are row-aligned, but we create dense matrices as the transpose */
302: MatCreateSeqDense(PETSC_COMM_SELF,nstages,nstages,G0,&G0mat);
303: MatCreateSeqDense(PETSC_COMM_SELF,nstages,nstages,G1,&G1mat);
304: MatCreateSeqDense(PETSC_COMM_SELF,nstages,nstages,A,&Amat);
305: MatLUFactor(G0mat,NULL,NULL,NULL);
306: MatMatSolve(G0mat,G1mat,Amat);
307: MatTranspose(Amat,MAT_INPLACE_MATRIX,&Amat);
309: MatDestroy(&G0mat);
310: MatDestroy(&G1mat);
311: MatDestroy(&Amat);
312: PetscFree3(b,G0,G1);
313: *gauss_A = A;
314: *gauss_c = c;
315: return 0;
316: }
318: static PetscErrorCode Assemble_AdvDiff(MPI_Comm comm,UserContext *user,Mat *J)
319: {
320: PetscInt matis,matie,i;
321: PetscReal dx,dx2;
323: dx = (user->xmax - user->xmin)/((PetscReal)user->imax); dx2 = dx*dx;
324: MatCreate(comm,J);
325: MatSetType(*J,MATAIJ);
326: MatSetSizes(*J,PETSC_DECIDE,PETSC_DECIDE,user->imax,user->imax);
327: MatSetUp(*J);
328: MatGetOwnershipRange(*J,&matis,&matie);
329: for (i=matis; i<matie; i++) {
330: PetscScalar values[3];
331: PetscInt col[3];
332: switch (user->physics_type) {
333: case PHYSICS_DIFFUSION:
334: values[0] = -user->a*1.0/dx2;
335: values[1] = user->a*2.0/dx2;
336: values[2] = -user->a*1.0/dx2;
337: break;
338: case PHYSICS_ADVECTION:
339: values[0] = -user->a*.5/dx;
340: values[1] = 0.;
341: values[2] = user->a*.5/dx;
342: break;
343: default: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"No support for physics type %s",PhysicsTypes[user->physics_type]);
344: }
345: /* periodic boundaries */
346: if (i == 0) {
347: col[0] = user->imax-1;
348: col[1] = i;
349: col[2] = i+1;
350: } else if (i == user->imax-1) {
351: col[0] = i-1;
352: col[1] = i;
353: col[2] = 0;
354: } else {
355: col[0] = i-1;
356: col[1] = i;
357: col[2] = i+1;
358: }
359: MatSetValues(*J,1,&i,3,col,values,INSERT_VALUES);
360: }
361: MatAssemblyBegin(*J,MAT_FINAL_ASSEMBLY);
362: MatAssemblyEnd (*J,MAT_FINAL_ASSEMBLY);
363: return 0;
364: }
366: /*TEST
367: testset:
368: suffix: 1
369: args: -a 0.1 -dt .125 -niter 5 -imax 40 -ksp_monitor_short -pc_type pbjacobi -irk_type gauss -irk_nstages 2
370: test:
371: args: -ksp_atol 1e-6
372: test:
373: requires: hpddm !single
374: suffix: hpddm
375: output_file: output/ex74_1.out
376: args: -ksp_atol 1e-6 -ksp_type hpddm
377: test:
378: requires: hpddm
379: suffix: hpddm_gcrodr
380: output_file: output/ex74_1_hpddm.out
381: args: -ksp_atol 1e-4 -ksp_view_final_residual -ksp_type hpddm -ksp_hpddm_type gcrodr -ksp_hpddm_recycle 2
382: test:
383: suffix: 2
384: args: -a 0.1 -dt .125 -niter 5 -imax 40 -ksp_monitor_short -pc_type pbjacobi -ksp_atol 1e-6 -irk_type gauss -irk_nstages 4 -ksp_gmres_restart 100
385: testset:
386: suffix: 3
387: requires: !single
388: args: -a 1 -dt .33 -niter 3 -imax 40 -ksp_monitor_short -pc_type pbjacobi -ksp_atol 1e-6 -irk_type gauss -irk_nstages 4 -ksp_gmres_restart 100 -physics_type advection
389: test:
390: args:
391: test:
392: requires: hpddm
393: suffix: hpddm
394: output_file: output/ex74_3.out
395: args: -ksp_type hpddm
396: test:
397: requires: hpddm
398: suffix: hpddm_gcrodr
399: output_file: output/ex74_3_hpddm.out
400: args: -ksp_view_final_residual -ksp_type hpddm -ksp_hpddm_type gcrodr -ksp_hpddm_recycle 5
402: TEST*/