Actual source code: ex81.c
1: #include <petsc.h>
3: static char help[] = "Solves a linear system with a MatNest and nested fields.\n\n";
5: #define Q 5 /* everything is hardwired for a 5x5 MatNest for now */
7: int main(int argc,char **args)
8: {
9: KSP ksp;
10: PC pc;
11: Mat array[Q*Q],A,a;
12: Vec b,x,sub;
13: IS rows[Q];
14: PetscInt i,j,*outer,M,N,found=Q;
15: PetscMPIInt size;
16: PetscBool flg;
17: PetscRandom rctx;
19: PetscInitialize(&argc,&args,NULL,help);
20: MPI_Comm_size(PETSC_COMM_WORLD,&size);
21: PetscMalloc1(found,&outer);
22: PetscOptionsGetIntArray(NULL,NULL,"-outer_fieldsplit_sizes",outer,&found,&flg);
23: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
24: if (flg) {
26: j = 0;
27: for (i=0; i<found; ++i) j += outer[i];
29: }
30: KSPCreate(PETSC_COMM_WORLD,&ksp);
31: size = PetscMax(3,size);
32: for (i=0; i<Q*Q; ++i) array[i] = NULL;
33: for (i=0; i<Q; ++i) {
34: if (i == 0) {
35: MatCreateAIJ(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,size,size,1,NULL,0,NULL,array+(Q+1)*i);
36: } else if (i == 1 || i == 3) {
37: MatCreateSBAIJ(PETSC_COMM_WORLD,2,PETSC_DECIDE,PETSC_DECIDE,size,size,1,NULL,0,NULL,array+(Q+1)*i);
38: } else if (i == 2 || i == 4) {
39: MatCreateBAIJ(PETSC_COMM_WORLD,2,PETSC_DECIDE,PETSC_DECIDE,size,size,1,NULL,0,NULL,array+(Q+1)*i);
40: }
41: MatAssemblyBegin(array[(Q+1)*i],MAT_FINAL_ASSEMBLY);
42: MatAssemblyEnd(array[(Q+1)*i],MAT_FINAL_ASSEMBLY);
43: MatShift(array[(Q+1)*i],100+i+1);
44: if (i == 3) {
45: MatDuplicate(array[(Q+1)*i],MAT_COPY_VALUES,&a);
46: MatDestroy(array+(Q+1)*i);
47: MatCreateHermitianTranspose(a,array+(Q+1)*i);
48: MatDestroy(&a);
49: }
50: size *= 2;
51: }
52: MatGetSize(array[0],&M,NULL);
53: for (i=2; i<Q; ++i) {
54: MatGetSize(array[(Q+1)*i],NULL,&N);
55: if (i != Q-1) {
56: MatCreateAIJ(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,i==3?N:M,i==3?M:N,0,NULL,0,NULL,array+i);
57: } else {
58: MatCreateDense(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,M,N,NULL,array+i);
59: }
60: MatAssemblyBegin(array[i],MAT_FINAL_ASSEMBLY);
61: MatAssemblyEnd(array[i],MAT_FINAL_ASSEMBLY);
62: MatSetRandom(array[i],rctx);
63: if (i == 3) {
64: MatDuplicate(array[i],MAT_COPY_VALUES,&a);
65: MatDestroy(array+i);
66: MatCreateHermitianTranspose(a,array+i);
67: MatDestroy(&a);
68: }
69: }
70: MatGetSize(array[0],NULL,&N);
71: for (i=2; i<Q; i+=2) {
72: MatGetSize(array[(Q+1)*i],&M,NULL);
73: if (i != Q-1) {
74: MatCreateAIJ(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,M,N,2,NULL,2,NULL,array+Q*i);
75: } else {
76: MatCreateDense(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,N,M,NULL,array+Q*i);
77: }
78: MatAssemblyBegin(array[Q*i],MAT_FINAL_ASSEMBLY);
79: MatAssemblyEnd(array[Q*i],MAT_FINAL_ASSEMBLY);
80: MatSetRandom(array[Q*i],rctx);
81: if (i == Q-1) {
82: MatDuplicate(array[Q*i],MAT_COPY_VALUES,&a);
83: MatDestroy(array+Q*i);
84: MatCreateHermitianTranspose(a,array+Q*i);
85: MatDestroy(&a);
86: }
87: }
88: MatGetSize(array[(Q+1)*3],&M,NULL);
89: for (i=1; i<3; ++i) {
90: MatGetSize(array[(Q+1)*i],NULL,&N);
91: MatCreateAIJ(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,M,N,2,NULL,2,NULL,array+Q*3+i);
92: MatAssemblyBegin(array[Q*3+i],MAT_FINAL_ASSEMBLY);
93: MatAssemblyEnd(array[Q*3+i],MAT_FINAL_ASSEMBLY);
94: MatSetRandom(array[Q*3+i],rctx);
95: }
96: MatGetSize(array[(Q+1)*1],NULL,&N);
97: MatGetSize(array[(Q+1)*(Q-1)],&M,NULL);
98: MatCreateBAIJ(PETSC_COMM_WORLD,2,PETSC_DECIDE,PETSC_DECIDE,M,N,0,NULL,0,NULL,&a);
99: MatAssemblyBegin(a,MAT_FINAL_ASSEMBLY);
100: MatAssemblyEnd(a,MAT_FINAL_ASSEMBLY);
101: MatCreateHermitianTranspose(a,array+Q+Q-1);
102: MatDestroy(&a);
103: MatDestroy(array+Q*Q-1);
104: MatCreateNest(PETSC_COMM_WORLD,Q,NULL,Q,NULL,array,&A);
105: for (i=0; i<Q; ++i) {
106: MatDestroy(array+(Q+1)*i);
107: }
108: for (i=2; i<Q; ++i) {
109: MatDestroy(array+i);
110: MatDestroy(array+Q*i);
111: }
112: for (i=1; i<3; ++i) {
113: MatDestroy(array+Q*3+i);
114: }
115: MatDestroy(array+Q+Q-1);
116: KSPSetOperators(ksp,A,A);
117: MatNestGetISs(A,rows,NULL);
118: KSPGetPC(ksp,&pc);
119: PCSetType(pc,PCFIELDSPLIT);
120: M = 0;
121: for (j=0; j<found; ++j) {
122: IS expand1,expand2;
123: ISDuplicate(rows[M],&expand1);
124: for (i=1; i<outer[j]; ++i) {
125: ISExpand(expand1,rows[M+i],&expand2);
126: ISDestroy(&expand1);
127: expand1 = expand2;
128: }
129: M += outer[j];
130: PCFieldSplitSetIS(pc,NULL,expand1);
131: ISDestroy(&expand1);
132: }
133: KSPSetFromOptions(ksp);
134: flg = PETSC_FALSE;
135: PetscOptionsGetBool(NULL,NULL,"-test_matmult",&flg,NULL);
136: if (flg) {
137: Mat D, E, F, H;
138: MatConvert(A,MATDENSE,MAT_INITIAL_MATRIX,&D);
139: MatMultEqual(A,D,10,&flg);
141: MatZeroEntries(D);
142: MatConvert(A,MATDENSE,MAT_REUSE_MATRIX,&D);
143: MatMultEqual(A,D,10,&flg);
145: MatConvert(D,MATAIJ,MAT_INITIAL_MATRIX,&E);
146: MatMultEqual(E,D,10,&flg);
148: MatZeroEntries(E);
149: MatConvert(D,MATAIJ,MAT_REUSE_MATRIX,&E);
150: MatMultEqual(E,D,10,&flg);
152: MatConvert(E,MATDENSE,MAT_INPLACE_MATRIX,&E);
153: MatMultEqual(A,E,10,&flg);
155: MatConvert(D,MATAIJ,MAT_INPLACE_MATRIX,&D);
156: MatMultEqual(A,D,10,&flg);
158: MatDestroy(&E);
159: MatCreateHermitianTranspose(D,&E);
160: MatConvert(E,MATAIJ,MAT_INITIAL_MATRIX,&F);
161: MatMultEqual(E,F,10,&flg);
163: MatZeroEntries(F);
164: MatConvert(E,MATAIJ,MAT_REUSE_MATRIX,&F);
165: MatMultEqual(E,F,10,&flg);
167: MatDestroy(&F);
168: MatConvert(E,MATAIJ,MAT_INPLACE_MATRIX,&E);
169: MatCreateHermitianTranspose(D,&H);
170: MatMultEqual(E,H,10,&flg);
172: MatDestroy(&H);
173: MatDestroy(&E);
174: MatDestroy(&D);
175: }
176: KSPSetUp(ksp);
177: MatCreateVecs(A,&b,&x);
178: VecSetRandom(b,rctx);
179: VecGetSubVector(b,rows[Q-1],&sub);
180: VecSet(sub,0.0);
181: VecRestoreSubVector(b,rows[Q-1],&sub);
182: KSPSolve(ksp,b,x);
183: VecDestroy(&b);
184: VecDestroy(&x);
185: PetscRandomDestroy(&rctx);
186: MatDestroy(&A);
187: KSPDestroy(&ksp);
188: PetscFree(outer);
189: PetscFinalize();
190: return 0;
191: }
193: /*TEST
195: test:
196: nsize: {{1 3}shared output}
197: suffix: wo_explicit_schur
198: filter: sed -e "s/seq/mpi/g" -e "s/CONVERGED_ATOL/CONVERGED_RTOL/g" -e "s/ 1 MPI processes/ 3 MPI processes/g" -e "s/iterations [4-5]/iterations 4/g"
199: args: -outer_fieldsplit_sizes {{1,2,2 2,1,2 2,2,1}separate output} -ksp_view_mat -pc_type fieldsplit -ksp_converged_reason -fieldsplit_pc_type jacobi -test_matmult
201: test:
202: nsize: {{1 3}shared output}
203: suffix: w_explicit_schur
204: filter: sed -e "s/seq/mpi/g" -e "s/CONVERGED_ATOL/CONVERGED_RTOL/g" -e "s/ 1 MPI processes/ 3 MPI processes/g" -e "s/iterations [1-2]/iterations 1/g"
205: args: -outer_fieldsplit_sizes {{1,4 2,3 3,2 4,1}separate output} -ksp_view_mat -pc_type fieldsplit -ksp_converged_reason -fieldsplit_pc_type jacobi -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full
207: TEST*/