Actual source code: ex38.c
1: /*
3: mpiexec -n 8 ./ex38 -ksp_type fbcgs -ksp_rtol 1.e-6 -sub_ksp_type bcgs -sub_ksp_rtol 1.e-3 -pc_type bjacobi -ksp_converged_reason -ksp_monitor -n1 64 -n2 64
5: Contributed by Jie Chen for testing flexible BiCGStab algorithm
6: */
8: static char help[] = "Solves the PDE (in 2D) -laplacian(u) + gamma x dot grad(u) + beta u = 1\n\
9: with zero Dirichlet condition. The discretization is standard centered\n\
10: difference. Input parameters include:\n\
11: -n1 : number of mesh points in 1st dimension (default 64)\n\
12: -n2 : number of mesh points in 2nd dimension (default 64)\n\
13: -h : spacing between mesh points (default 1/n1)\n\
14: -gamma : gamma (default 4/h)\n\
15: -beta : beta (default 0.01/h^2)\n\n";
17: /*
18: Include "petscksp.h" so that we can use KSP solvers. Note that this file
19: automatically includes:
20: petscsys.h - base PETSc routines petscvec.h - vectors
21: petscmat.h - matrices
22: petscis.h - index sets petscksp.h - Krylov subspace methods
23: petscviewer.h - viewers petscpc.h - preconditioners
24: */
25: #include <petscksp.h>
27: int main(int argc,char **args)
28: {
29: Vec x,b,u; /* approx solution, RHS, working vector */
30: Mat A; /* linear system matrix */
31: KSP ksp; /* linear solver context */
32: PetscInt n1, n2; /* parameters */
33: PetscReal h, gamma, beta; /* parameters */
34: PetscInt i,j,Ii,J,Istart,Iend;
35: PetscScalar v, co1, co2;
36: #if defined(PETSC_USE_LOG)
37: PetscLogStage stage;
38: #endif
40: PetscInitialize(&argc,&args,(char*)0,help);
41: n1 = 64;
42: n2 = 64;
43: PetscOptionsGetInt(NULL,NULL,"-n1",&n1,NULL);
44: PetscOptionsGetInt(NULL,NULL,"-n2",&n2,NULL);
46: h = 1.0/n1;
47: gamma = 4.0;
48: beta = 0.01;
49: PetscOptionsGetReal(NULL,NULL,"-h",&h,NULL);
50: PetscOptionsGetReal(NULL,NULL,"-gamma",&gamma,NULL);
51: PetscOptionsGetReal(NULL,NULL,"-beta",&beta,NULL);
52: gamma = gamma/h;
53: beta = beta/(h*h);
55: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
56: Compute the matrix and set right-hand-side vector.
57: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
58: /*
59: Create parallel matrix, specifying only its global dimensions.
60: When using MatCreate(), the matrix format can be specified at
61: runtime. Also, the parallel partitioning of the matrix is
62: determined by PETSc at runtime.
64: Performance tuning note: For problems of substantial size,
65: preallocation of matrix memory is crucial for attaining good
66: performance. See the matrix chapter of the users manual for details.
67: */
68: MatCreate(PETSC_COMM_WORLD,&A);
69: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,n1*n2,n1*n2);
70: MatSetFromOptions(A);
71: MatMPIAIJSetPreallocation(A,5,NULL,5,NULL);
72: MatSeqAIJSetPreallocation(A,5,NULL);
73: MatSetUp(A);
75: /*
76: Currently, all PETSc parallel matrix formats are partitioned by
77: contiguous chunks of rows across the processors. Determine which
78: rows of the matrix are locally owned.
79: */
80: MatGetOwnershipRange(A,&Istart,&Iend);
82: /*
83: Set matrix elements for the 2-D, five-point stencil in parallel.
84: - Each processor needs to insert only elements that it owns
85: locally (but any non-local elements will be sent to the
86: appropriate processor during matrix assembly).
87: - Always specify global rows and columns of matrix entries.
88: */
89: PetscLogStageRegister("Assembly", &stage);
90: PetscLogStagePush(stage);
91: co1 = gamma * h * h / 2.0;
92: co2 = beta * h * h;
93: for (Ii=Istart; Ii<Iend; Ii++) {
94: i = Ii/n2; j = Ii - i*n2;
95: if (i>0) {
96: J = Ii - n2; v = -1.0 + co1*(PetscScalar)i;
97: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
98: }
99: if (i<n1-1) {
100: J = Ii + n2; v = -1.0 + co1*(PetscScalar)i;
101: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
102: }
103: if (j>0) {
104: J = Ii - 1; v = -1.0 + co1*(PetscScalar)j;
105: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
106: }
107: if (j<n2-1) {
108: J = Ii + 1; v = -1.0 + co1*(PetscScalar)j;
109: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
110: }
111: v = 4.0 + co2;
112: MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
113: }
115: /*
116: Assemble matrix, using the 2-step process:
117: MatAssemblyBegin(), MatAssemblyEnd()
118: Computations can be done while messages are in transition
119: by placing code between these two statements.
120: */
121: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
122: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
123: PetscLogStagePop();
125: /*
126: Create parallel vectors.
127: - We form 1 vector from scratch and then duplicate as needed.
128: - When using VecCreate(), VecSetSizes and VecSetFromOptions()
129: in this example, we specify only the
130: vector's global dimension; the parallel partitioning is determined
131: at runtime.
132: - When solving a linear system, the vectors and matrices MUST
133: be partitioned accordingly. PETSc automatically generates
134: appropriately partitioned matrices and vectors when MatCreate()
135: and VecCreate() are used with the same communicator.
136: - The user can alternatively specify the local vector and matrix
137: dimensions when more sophisticated partitioning is needed
138: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
139: below).
140: */
141: VecCreate(PETSC_COMM_WORLD,&b);
142: VecSetSizes(b,PETSC_DECIDE,n1*n2);
143: VecSetFromOptions(b);
144: VecDuplicate(b,&x);
145: VecDuplicate(b,&u);
147: /*
148: Set right-hand side.
149: */
150: VecSet(b,1.0);
152: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
153: Create the linear solver and set various options
154: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
155: /*
156: Create linear solver context
157: */
158: KSPCreate(PETSC_COMM_WORLD,&ksp);
160: /*
161: Set operators. Here the matrix that defines the linear system
162: also serves as the preconditioning matrix.
163: */
164: KSPSetOperators(ksp,A,A);
166: /*
167: Set linear solver defaults for this problem (optional).
168: - By extracting the KSP and PC contexts from the KSP context,
169: we can then directly call any KSP and PC routines to set
170: various options.
171: */
172: KSPSetTolerances(ksp,1.e-6,PETSC_DEFAULT,PETSC_DEFAULT,200);
174: /*
175: Set runtime options, e.g.,
176: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
177: These options will override those specified above as long as
178: KSPSetFromOptions() is called _after_ any other customization
179: routines.
180: */
181: KSPSetFromOptions(ksp);
183: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
184: Solve the linear system
185: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
187: KSPSolve(ksp,b,x);
189: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
190: Clean up
191: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
192: /*
193: Free work space. All PETSc objects should be destroyed when they
194: are no longer needed.
195: */
196: KSPDestroy(&ksp);
197: VecDestroy(&u)); PetscCall(VecDestroy(&x);
198: VecDestroy(&b)); PetscCall(MatDestroy(&A);
200: /*
201: Always call PetscFinalize() before exiting a program. This routine
202: - finalizes the PETSc libraries as well as MPI
203: - provides summary and diagnostic information if certain runtime
204: options are chosen (e.g., -log_view).
205: */
206: PetscFinalize();
207: return 0;
208: }
210: /*TEST
212: test:
213: nsize: 8
214: args: -ksp_type fbcgs -ksp_rtol 1.e-6 -sub_ksp_type bcgs -sub_ksp_rtol 1.e-3 -pc_type bjacobi -ksp_converged_reason -n1 64 -n2 64
216: test:
217: suffix: 2
218: nsize: 8
219: args: -ksp_type qmrcgs -ksp_rtol 1.e-6 -sub_ksp_type bcgs -sub_ksp_rtol 1.e-3 -pc_type bjacobi -ksp_converged_reason -n1 64 -n2 64
220: output_file: output/ex38_1.out
222: TEST*/